codeditor p1

This commit is contained in:
Niklas Peng 2025-05-30 19:48:30 +02:00
parent cf94545cad
commit c78e140265
4 changed files with 419 additions and 1 deletions

View File

@ -1,3 +1,6 @@
"""Code Editor package.""" #!/usr/bin/env python
# -*- coding: utf-8 -*-
"""AI-supported lightweight code editor package."""
__version__ = "0.1.0" __version__ = "0.1.0"

281
src/chat_manager.py Normal file
View File

@ -0,0 +1,281 @@
#!/usr/bin/env python
# -*- coding: utf-8 -*-
"""Chat management module for the codeeditor application."""
import logging
import os
import re
from typing import Dict, List, Optional, Tuple
import anthropic
import openai
from dotenv import load_dotenv
from codeeditor.system_prompter import SystemPrompter
# Load environment variables from .env file
load_dotenv()
logger = logging.getLogger(__name__)
# Set up API keys
OPENAI_API_KEY = os.getenv("OPENAI_API_KEY")
ANTHROPIC_API_KEY = os.getenv("ANTHROPIC_API_KEY")
AI_PROVIDER = os.getenv("AI_PROVIDER", "anthropic").lower() # Default to Anthropic
class ChatManager:
"""Class for managing chat interactions with the AI assistant."""
def __init__(self):
"""Initialize the ChatManager."""
self.system_prompter = SystemPrompter()
self.chat_history = []
self.current_file_path = None
self.current_file_content = None
# Initialize the appropriate client based on the provider
if AI_PROVIDER == "openai" and OPENAI_API_KEY:
self.provider = "openai"
self.model = os.getenv("OPENAI_MODEL", "gpt-4")
openai.api_key = OPENAI_API_KEY
logger.info("ChatManager initialized with OpenAI")
elif AI_PROVIDER == "anthropic" and ANTHROPIC_API_KEY:
self.provider = "anthropic"
self.model = os.getenv("ANTHROPIC_MODEL", "claude-3-opus-20240229")
self.client = anthropic.Anthropic(api_key=ANTHROPIC_API_KEY)
logger.info("ChatManager initialized with Anthropic")
else:
self.provider = "none"
logger.warning(
"No valid AI provider configured. Please set AI_PROVIDER and corresponding API key."
)
def send_message(
self, user_message: str, file_context: Optional[str] = None
) -> str:
"""Send a message to the AI assistant and get a response.
Args:
user_message: The user's message to send to the AI.
file_context: Optional context from the current file.
Returns:
The AI's response as a string.
"""
try:
# Parse file context if provided
if file_context:
(
self.current_file_path,
self.current_file_content,
) = self._parse_file_context(file_context)
# Check for special commands
if user_message.startswith("/"):
return self._handle_special_command(user_message)
# Generate system prompt with file context if provided
system_prompt = self.system_prompter.generate_prompt(file_context)
if self.provider == "openai":
return self._send_openai_message(system_prompt, user_message)
elif self.provider == "anthropic":
return self._send_anthropic_message(system_prompt, user_message)
else:
error_msg = (
"No valid AI provider configured. Please check your .env file."
)
logger.error(error_msg)
return f"Error: {error_msg}"
except Exception as e:
error_msg = f"Error communicating with AI assistant: {e}"
logger.error(error_msg)
return f"Error: {error_msg}"
def _parse_file_context(self, file_context: str) -> Tuple[str, str]:
"""Parse file context to extract file path and content.
Args:
file_context: File context string.
Returns:
Tuple of (file_path, file_content).
"""
lines = file_context.split("\n", 1)
file_path = lines[0].replace("File: ", "").strip()
file_content = lines[1] if len(lines) > 1 else ""
return file_path, file_content
def _handle_special_command(self, command: str) -> str:
"""Handle special commands starting with /.
Args:
command: The command string.
Returns:
Response to the command.
"""
command = command.strip().lower()
# Handle improve code command
if command.startswith("/improve") and self.current_file_content:
prompt = self.system_prompter.generate_code_improvement_prompt(
self.current_file_content, self.current_file_path
)
if self.provider == "openai":
return self._send_openai_message(prompt, "Please improve this code.")
elif self.provider == "anthropic":
return self._send_anthropic_message(prompt, "Please improve this code.")
# Handle explain code command
elif command.startswith("/explain") and self.current_file_content:
return self._send_message_with_context(
"Please explain how this code works in detail.",
f"File: {self.current_file_path}\n{self.current_file_content}",
)
# Handle help command
elif command.startswith("/help"):
return (
"Available commands:\n"
"- /improve - Suggest improvements for the current file\n"
"- /explain - Explain how the current file's code works\n"
"- /help - Show this help message\n"
"- /clear - Clear the chat history"
)
# Handle clear command
elif command.startswith("/clear"):
self.clear_history()
return "Chat history cleared."
return f"Unknown command: {command}. Type /help for available commands."
def _send_message_with_context(self, message: str, context: str) -> str:
"""Send a message with specific context.
Args:
message: The message to send.
context: The context to include.
Returns:
The AI's response.
"""
system_prompt = self.system_prompter.generate_prompt(context)
if self.provider == "openai":
return self._send_openai_message(system_prompt, message)
elif self.provider == "anthropic":
return self._send_anthropic_message(system_prompt, message)
else:
return "No AI provider configured."
def _send_openai_message(self, system_prompt: str, user_message: str) -> str:
"""Send a message using OpenAI API.
Args:
system_prompt: The system prompt to guide the AI.
user_message: The user's message.
Returns:
The AI's response as a string.
"""
# Prepare messages for the API call
messages = [{"role": "system", "content": system_prompt}]
# Add chat history
for msg in self.chat_history:
messages.append(msg)
# Add the new user message
messages.append({"role": "user", "content": user_message})
# Call the OpenAI API
response = openai.chat.completions.create(
model=self.model,
messages=messages,
temperature=0.7,
max_tokens=2000,
)
# Extract the assistant's message
assistant_message = response.choices[0].message.content
# Update chat history
self.chat_history.append({"role": "user", "content": user_message})
self.chat_history.append({"role": "assistant", "content": assistant_message})
logger.info("Received response from OpenAI assistant")
return assistant_message
def _send_anthropic_message(self, system_prompt: str, user_message: str) -> str:
"""Send a message using Anthropic API.
Args:
system_prompt: The system prompt to guide the AI.
user_message: The user's message.
Returns:
The AI's response as a string.
"""
# Prepare messages for the API call
messages = []
# Add chat history
for msg in self.chat_history:
role = msg["role"]
content = msg["content"]
if role == "user":
messages.append({"role": "user", "content": content})
elif role == "assistant":
messages.append({"role": "assistant", "content": content})
# Add the new user message
messages.append({"role": "user", "content": user_message})
# Call the Anthropic API
response = self.client.messages.create(
model=self.model,
system=system_prompt,
messages=messages,
max_tokens=2000,
temperature=0.7,
)
# Extract the assistant's message
assistant_message = response.content[0].text
# Update chat history
self.chat_history.append({"role": "user", "content": user_message})
self.chat_history.append({"role": "assistant", "content": assistant_message})
logger.info("Received response from Anthropic assistant")
return assistant_message
def clear_history(self):
"""Clear the chat history."""
self.chat_history = []
logger.info("Chat history cleared")
def get_history(self) -> List[Dict[str, str]]:
"""Get the chat history.
Returns:
The chat history as a list of dictionaries.
"""
return self.chat_history
def set_current_file(self, file_path: str, file_content: str):
"""Set the current file being edited.
Args:
file_path: Path to the current file.
file_content: Content of the current file.
"""
self.current_file_path = file_path
self.current_file_content = file_content

15
src/env_example Normal file
View File

@ -0,0 +1,15 @@
# AI Provider Configuration
# Set to "openai" or "anthropic"
AI_PROVIDER=anthropic
# OpenAI API Configuration (if using OpenAI) - not implemented!
OPENAI_API_KEY=your_openai_api_key_here
OPENAI_MODEL=gpt-4
# Anthropic API Configuration (if using Anthropic)
ANTHROPIC_API_KEY=your_anthropic_api_key_here
ANTHROPIC_MODEL=claude-3-opus-20240229
# Search API Configuration (Google Custom Search)
SEARCH_API_KEY=your_google_api_key_here
SEARCH_ENGINE_ID=your_search_engine_id_here

119
src/execution_engine.py Normal file
View File

@ -0,0 +1,119 @@
#!/usr/bin/env python
# -*- coding: utf-8 -*-
"""Code execution module for the codeeditor application."""
import io
import logging
import subprocess
import sys
import tempfile
import traceback
from typing import Dict, Tuple
logger = logging.getLogger(__name__)
class ExecutionEngine:
"""Class for executing code and capturing output."""
def __init__(self):
"""Initialize the ExecutionEngine."""
logger.info("ExecutionEngine initialized")
def run_code(self, code: str) -> Dict[str, str]:
"""Run Python code and capture the output.
Args:
code: The Python code to execute.
Returns:
A dictionary containing 'output', 'error', and 'status'.
"""
result = {"output": "", "error": "", "status": "success"}
try:
# Create a temporary file to store the code
with tempfile.NamedTemporaryFile(
suffix=".py", delete=False, mode="w"
) as temp_file:
temp_file.write(code)
temp_file_path = temp_file.name
# Run the code in a subprocess
process = subprocess.Popen(
[sys.executable, temp_file_path],
stdout=subprocess.PIPE,
stderr=subprocess.PIPE,
text=True,
)
# Capture output and errors
stdout, stderr = process.communicate(timeout=10) # 10 second timeout
result["output"] = stdout
if stderr:
result["error"] = stderr
result["status"] = "error"
logger.info(f"Code execution completed with status: {result['status']}")
except subprocess.TimeoutExpired:
result["error"] = "Execution timed out after 10 seconds"
result["status"] = "timeout"
logger.warning("Code execution timed out")
except Exception as e:
result["error"] = f"Error executing code: {str(e)}"
result["status"] = "error"
logger.error(f"Error executing code: {e}")
return result
def run_code_in_memory(self, code: str) -> Dict[str, str]:
"""Run Python code in memory and capture the output.
This method is safer for small code snippets as it doesn't write to disk.
Args:
code: The Python code to execute.
Returns:
A dictionary containing 'output', 'error', and 'status'.
"""
result = {"output": "", "error": "", "status": "success"}
# Redirect stdout and stderr
old_stdout = sys.stdout
old_stderr = sys.stderr
redirected_output = io.StringIO()
redirected_error = io.StringIO()
sys.stdout = redirected_output
sys.stderr = redirected_error
try:
# Execute the code
exec(code)
result["output"] = redirected_output.getvalue()
error_output = redirected_error.getvalue()
if error_output:
result["error"] = error_output
result["status"] = "error"
logger.info(
f"In-memory code execution completed with status: {result['status']}"
)
except Exception as e:
result["error"] = f"{str(e)}\n{traceback.format_exc()}"
result["status"] = "error"
logger.error(f"Error executing code in memory: {e}")
finally:
# Restore stdout and stderr
sys.stdout = old_stdout
sys.stderr = old_stderr
return result