import openai import pyperclip import sys # --------------------------- # OpenAI Client Configuration # --------------------------- # Initialize the OpenAI client with your custom settings client = openai.OpenAI( base_url='http://localhost:11434/v1/', # Required but ignored api_key='ollama', ) def get_completion(prompt, model="llama3.2"): """ Sends a prompt to the OpenAI API and returns the complete response. Args: prompt (str): The prompt to send to the model. model (str): The model to use for completion. Returns: str: The complete response from the model. """ try: # Prepend instruction to the prompt formatted_prompt = "Answer the following Question or explain the key points of the following topic, in a few sentences or less:\n" + prompt response = client.chat.completions.create( model=model, messages=[{"role": "user", "content": formatted_prompt}], temperature=0.5, stream=False # Disable streaming ) # Extract the response content completion = response.choices[0].message.content.strip() return completion except Exception as e: print(f"Error during OpenAI API call: {e}") sys.exit(1) def process_clipboard(): """ Reads the clipboard content, sends it to the LLM, receives the response, and copies it back to the clipboard. """ try: # 1. Read current clipboard content clipboard_text = pyperclip.paste() print(f"Clipboard content read: {clipboard_text}") if not clipboard_text.strip(): print("Clipboard is empty. Exiting.") return # 2. Send clipboard content to OpenAI API without streaming print("Sending prompt to LLM and awaiting response...\n") response = get_completion(clipboard_text) print(f"Received response: {response}\n") if response: # 3. Copy the response back to clipboard pyperclip.copy(response) print("Response copied to clipboard.") else: print("No response received from the LLM.") except Exception as e: print(f"An error occurred: {e}") def main(): """ Main function to initiate clipboard processing. """ process_clipboard() if __name__ == "__main__": main()