Upload 13 files
Browse files- .env +7 -0
- Dockerfile +20 -0
- app.py +273 -0
- logs/classifier_responses.txt +0 -0
- logs/generator_direct_responses.txt +0 -0
- logs/generator_interpreted_responses.txt +0 -0
- logs/instructTuned_enhance_direct_responses.txt +0 -0
- logs/instructTuned_enhance_interpreted_responses.txt +0 -0
- logs/instructTuned_responses.txt +0 -0
- logs/interpreter_responses.txt +0 -0
- logs/reviewer_compare_responses.txt +0 -0
- logs/reviewer_polish_responses.txt +0 -0
- requirements.txt +5 -0
.env
ADDED
@@ -0,0 +1,7 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
# OpenRouter API Key (Get it from https://openrouter.ai/)
|
2 |
+
OPENROUTER_API_KEY=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
|
3 |
+
|
4 |
+
# App Configuration (Optional)
|
5 |
+
BASE_URL=https://openrouter.ai/api/v1
|
6 |
+
APP_NAME=Code-Generation-Agent
|
7 |
+
HTTP_REFERER=https://conv-code-generation-agent.vercel.app
|
Dockerfile
ADDED
@@ -0,0 +1,20 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
# Use the official Python image from the Docker Hub
|
2 |
+
FROM python:3.9
|
3 |
+
|
4 |
+
# Set the working directory in the container
|
5 |
+
WORKDIR /app
|
6 |
+
|
7 |
+
# Copy the requirements.txt file into the container
|
8 |
+
COPY requirements.txt .
|
9 |
+
|
10 |
+
# Install the dependencies
|
11 |
+
RUN pip install --no-cache-dir -r requirements.txt
|
12 |
+
|
13 |
+
# Copy the rest of the application code into the container
|
14 |
+
COPY . .
|
15 |
+
|
16 |
+
# Expose the port that the app will run on
|
17 |
+
EXPOSE 7860
|
18 |
+
|
19 |
+
# Set the command to run the FastAPI app with Uvicorn
|
20 |
+
CMD ["uvicorn", "app:app", "--host", "0.0.0.0", "--port", "7860"]
|
app.py
ADDED
@@ -0,0 +1,273 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
from fastapi import FastAPI, HTTPException, Query
|
2 |
+
import httpx
|
3 |
+
import os
|
4 |
+
import re
|
5 |
+
from typing import Dict
|
6 |
+
from pydantic import BaseModel
|
7 |
+
from dotenv import load_dotenv
|
8 |
+
|
9 |
+
# Load environment variables from .env file
|
10 |
+
load_dotenv()
|
11 |
+
|
12 |
+
app = FastAPI()
|
13 |
+
|
14 |
+
# Define the models to use for each step with appropriate OpenRouter model IDs
|
15 |
+
MODELS = {
|
16 |
+
"classifier": "mistralai/mistral-7b-instruct:free", # For classifying query complexity
|
17 |
+
"interpreter": "google/gemini-2.0-flash-thinking-exp:free", # For understanding complex queries
|
18 |
+
"generator": "qwen/qwen2.5-vl-32b-instruct:free", # For code generation
|
19 |
+
"instructTuned": "mistralai/mistral-7b-instruct:free", # For customization and enhancement
|
20 |
+
"reviewer": "deepseek/deepseek-r1:free", # For final review
|
21 |
+
}
|
22 |
+
|
23 |
+
# Get API key from environment variables
|
24 |
+
OPENROUTER_API_KEY = os.getenv("OPENROUTER_API_KEY")
|
25 |
+
if not OPENROUTER_API_KEY:
|
26 |
+
print("WARNING: OPENROUTER_API_KEY not found in environment variables!")
|
27 |
+
|
28 |
+
# Response models
|
29 |
+
class SimpleResponse(BaseModel):
|
30 |
+
code: str
|
31 |
+
steps: Dict[str, str]
|
32 |
+
|
33 |
+
class AmbiguousResponse(BaseModel):
|
34 |
+
code: str
|
35 |
+
feedback: str
|
36 |
+
ambiguous: bool = True
|
37 |
+
|
38 |
+
class ComplexResponse(BaseModel):
|
39 |
+
code: str
|
40 |
+
steps: Dict[str, str]
|
41 |
+
|
42 |
+
@app.get("/generate")
|
43 |
+
async def generate_code(query: str = Query(..., description="The programming query to process")):
|
44 |
+
try:
|
45 |
+
if not query:
|
46 |
+
raise HTTPException(status_code=400, detail="Query is required")
|
47 |
+
|
48 |
+
print("Processing query:", query)
|
49 |
+
|
50 |
+
# STEP 1: Query Classification - Determine complexity
|
51 |
+
print("Classifying query complexity...")
|
52 |
+
complexity_analysis = await call_openrouter(
|
53 |
+
MODELS["classifier"],
|
54 |
+
f"""Analyze this coding request and classify it as "simple" or "complex" or "ambiguous".
|
55 |
+
- Simple: Basic requests like "Write a Python function to reverse a string."
|
56 |
+
- Complex: Requests involving customization, optimization, or multiple features.
|
57 |
+
- Ambiguous: Unclear requests lacking specifics.
|
58 |
+
Respond with ONLY the word "simple", "complex", or "ambiguous".
|
59 |
+
Query: "{query}"""
|
60 |
+
)
|
61 |
+
|
62 |
+
query_type = re.sub(r'[^a-z]', '', complexity_analysis.strip().lower())
|
63 |
+
print("Query classified as:", query_type)
|
64 |
+
|
65 |
+
# Process based on complexity
|
66 |
+
if "ambiguous" in query_type:
|
67 |
+
return await handle_ambiguous_query(query)
|
68 |
+
elif "simple" in query_type:
|
69 |
+
return await handle_simple_query(query)
|
70 |
+
else:
|
71 |
+
return await handle_complex_query(query)
|
72 |
+
|
73 |
+
except Exception as error:
|
74 |
+
print("Error in code generation:", error)
|
75 |
+
raise HTTPException(status_code=500, detail=f"Failed to generate code: {str(error)}")
|
76 |
+
|
77 |
+
async def handle_ambiguous_query(query: str):
|
78 |
+
print("Processing ambiguous query...")
|
79 |
+
feedback = await call_openrouter(
|
80 |
+
MODELS["interpreter"],
|
81 |
+
f"""You are a coding assistant. This query is ambiguous or lacks specifics: "{query}".
|
82 |
+
Please provide helpful feedback on what details are needed to generate the code.
|
83 |
+
Be specific about what information is missing (e.g., programming language, input/output format, etc.)."""
|
84 |
+
)
|
85 |
+
|
86 |
+
print("Ambiguous query feedback:", feedback)
|
87 |
+
|
88 |
+
return AmbiguousResponse(
|
89 |
+
code=f" {feedback}\n\nPlease provide more details so I can generate the appropriate code for you.",
|
90 |
+
feedback=feedback
|
91 |
+
)
|
92 |
+
|
93 |
+
async def handle_simple_query(query: str):
|
94 |
+
print("Processing simple query with single model...")
|
95 |
+
|
96 |
+
code = await call_openrouter(
|
97 |
+
MODELS["instructTuned"],
|
98 |
+
f"""You are a programming assistant. Generate clean, well-commented, production-ready code for this request: "{query}".
|
99 |
+
Include proper error handling and follow best practices for the chosen programming language.
|
100 |
+
Focus on writing efficient, readable code that directly addresses the request."""
|
101 |
+
)
|
102 |
+
|
103 |
+
print(f"Simple query result: {code[:100]}...")
|
104 |
+
|
105 |
+
return SimpleResponse(
|
106 |
+
code=code,
|
107 |
+
steps={"directCode": code}
|
108 |
+
)
|
109 |
+
|
110 |
+
async def handle_complex_query(query: str):
|
111 |
+
print("Processing complex query with full pipeline...")
|
112 |
+
|
113 |
+
# STEP 1: Get detailed interpretation
|
114 |
+
print("Getting detailed interpretation...")
|
115 |
+
interpretation = await call_openrouter(
|
116 |
+
MODELS["interpreter"],
|
117 |
+
f"""You are a programming assistant. Understand this coding request and convert it into a clear, detailed specification: "{query}"""
|
118 |
+
)
|
119 |
+
|
120 |
+
print(f"Interpretation complete: {interpretation[:150]}...")
|
121 |
+
|
122 |
+
# Check if interpretation reveals ambiguity
|
123 |
+
if any(word in interpretation.lower() for word in ["unclear", "ambiguous", "need more information"]):
|
124 |
+
print("Interpretation indicates ambiguity, redirecting to ambiguous handler")
|
125 |
+
return await handle_ambiguous_query(query)
|
126 |
+
|
127 |
+
# STEP 2-4: Run the remaining steps in parallel paths for efficiency
|
128 |
+
print("Running parallel processing paths...")
|
129 |
+
try:
|
130 |
+
# PATH 1: Direct code generation with enhancement
|
131 |
+
async def direct_code_path():
|
132 |
+
print("PATH 1: Starting direct code generation")
|
133 |
+
# Generate code directly from query
|
134 |
+
direct_code = await call_openrouter(
|
135 |
+
MODELS["generator"],
|
136 |
+
f"""You are a code generation expert. Generate clean, well-commented, production-ready code for: "{query}".
|
137 |
+
Include proper error handling and follow best practices."""
|
138 |
+
)
|
139 |
+
|
140 |
+
print("PATH 1: Starting code enhancement")
|
141 |
+
# Enhance direct code
|
142 |
+
enhanced_direct_code = await call_openrouter(
|
143 |
+
MODELS["instructTuned"],
|
144 |
+
f"""Improve this code for better performance and readability. Apply any specific customizations
|
145 |
+
mentioned in this request: "{query}"\n\nCode:\n{direct_code}"""
|
146 |
+
)
|
147 |
+
|
148 |
+
print("PATH 1: Complete")
|
149 |
+
return {"directCode": direct_code, "enhancedDirectCode": enhanced_direct_code}
|
150 |
+
|
151 |
+
# PATH 2: Interpreted code generation with enhancement
|
152 |
+
async def interpreted_code_path():
|
153 |
+
print("PATH 2: Starting interpreted code generation")
|
154 |
+
# Generate code from interpretation
|
155 |
+
interpreted_code = await call_openrouter(
|
156 |
+
MODELS["generator"],
|
157 |
+
f"""Generate clean, well-commented code based on this specification: "{interpretation}"""
|
158 |
+
)
|
159 |
+
|
160 |
+
print("PATH 2: Starting code enhancement")
|
161 |
+
# Enhance interpreted code
|
162 |
+
enhanced_interpreted_code = await call_openrouter(
|
163 |
+
MODELS["instructTuned"],
|
164 |
+
f"""Improve this code for better performance, readability, and error handling:\n\n{interpreted_code}"""
|
165 |
+
)
|
166 |
+
|
167 |
+
print("PATH 2: Complete")
|
168 |
+
return {"interpretedCode": interpreted_code, "enhancedInterpretedCode": enhanced_interpreted_code}
|
169 |
+
|
170 |
+
import asyncio
|
171 |
+
direct_path_result, interpreted_path_result = await asyncio.gather(
|
172 |
+
direct_code_path(),
|
173 |
+
interpreted_code_path()
|
174 |
+
)
|
175 |
+
|
176 |
+
# STEP 5: Select the best implementation
|
177 |
+
print("Selecting best implementation...")
|
178 |
+
selection_prompt = f"""You are a code selection expert. Choose the better implementation that is more correct, efficient, and readable.
|
179 |
+
IMPLEMENTATION 1:\n{direct_path_result['enhancedDirectCode']}\n\n
|
180 |
+
IMPLEMENTATION 2:\n{interpreted_path_result['enhancedInterpretedCode']}\n\n
|
181 |
+
Respond with ONLY the complete selected implementation, no explanation needed."""
|
182 |
+
|
183 |
+
best_implementation = await call_openrouter(MODELS["reviewer"], selection_prompt)
|
184 |
+
print("Best implementation selected.")
|
185 |
+
|
186 |
+
# STEP 6: Final review and polishing
|
187 |
+
print("Performing final code review...")
|
188 |
+
final_response = await call_openrouter(
|
189 |
+
MODELS["reviewer"],
|
190 |
+
f"""Review this code and ensure it meets industry best practices, security standards, and handles errors properly.
|
191 |
+
Add thorough comments explaining the key components and any optimizations you've made.
|
192 |
+
Provide the final, improved version:\n\n{best_implementation}"""
|
193 |
+
)
|
194 |
+
|
195 |
+
print("Final response generated. Process complete.")
|
196 |
+
|
197 |
+
# Combine all steps for the response
|
198 |
+
steps = {
|
199 |
+
"interpretation": interpretation,
|
200 |
+
"directCode": direct_path_result["directCode"],
|
201 |
+
"interpretedCode": interpreted_path_result["interpretedCode"],
|
202 |
+
"enhancedDirectCode": direct_path_result["enhancedDirectCode"],
|
203 |
+
"enhancedInterpretedCode": interpreted_path_result["enhancedInterpretedCode"],
|
204 |
+
"bestImplementation": best_implementation
|
205 |
+
}
|
206 |
+
|
207 |
+
return ComplexResponse(
|
208 |
+
code=final_response,
|
209 |
+
steps=steps
|
210 |
+
)
|
211 |
+
except Exception as error:
|
212 |
+
print("Error in complex query processing:", error)
|
213 |
+
|
214 |
+
# Fallback to simple query handling if something fails in the complex path
|
215 |
+
print("Falling back to simple query handling...")
|
216 |
+
return await handle_simple_query(query)
|
217 |
+
|
218 |
+
async def call_openrouter(model: str, input: str):
|
219 |
+
try:
|
220 |
+
print(f"Calling OpenRouter with model: {model}")
|
221 |
+
|
222 |
+
async with httpx.AsyncClient(timeout=60.0) as client:
|
223 |
+
response = await client.post(
|
224 |
+
"https://openrouter.ai/api/v1/chat/completions",
|
225 |
+
json={
|
226 |
+
"model": model,
|
227 |
+
"messages": [{"role": "user", "content": input}],
|
228 |
+
"temperature": 0.5,
|
229 |
+
"max_tokens": 9999,
|
230 |
+
},
|
231 |
+
headers={
|
232 |
+
"Authorization": f"Bearer {OPENROUTER_API_KEY}",
|
233 |
+
"HTTP-Referer": "https://conv-code-generation-agent.vercel.app",
|
234 |
+
"X-Title": "Code Generation Agent"
|
235 |
+
},
|
236 |
+
)
|
237 |
+
|
238 |
+
response_data = response.json()
|
239 |
+
if response_data and "choices" in response_data and response_data["choices"]:
|
240 |
+
content = response_data["choices"][0]["message"]["content"]
|
241 |
+
|
242 |
+
# Log model response with clear separation
|
243 |
+
print(f"\n========== RESPONSE FROM {model} ==========")
|
244 |
+
print(content[:500] + ('...' if len(content) > 500 else ''))
|
245 |
+
print('==========================================\n')
|
246 |
+
|
247 |
+
return content
|
248 |
+
else:
|
249 |
+
print("Unexpected response format:", response_data)
|
250 |
+
return "Unexpected API response format"
|
251 |
+
except Exception as error:
|
252 |
+
error_message = getattr(error, "response", {}).get("data", str(error))
|
253 |
+
print(f"Error calling OpenRouter with model {model}:", error_message)
|
254 |
+
# Return error message instead of throwing
|
255 |
+
return f"Failed to generate with {model}. Error: {str(error)}"
|
256 |
+
|
257 |
+
# For documentation and browser testing
|
258 |
+
@app.get("/")
|
259 |
+
async def root():
|
260 |
+
return {
|
261 |
+
"message": "Code Generation API",
|
262 |
+
"usage": "Use GET /generate?query=your programming question",
|
263 |
+
"example": "/generate?query=Write a Python function to reverse a string"
|
264 |
+
}
|
265 |
+
|
266 |
+
# For running the app locally
|
267 |
+
if __name__ == "__main__":
|
268 |
+
if not OPENROUTER_API_KEY:
|
269 |
+
print("ERROR: OPENROUTER_API_KEY must be set in .env file or environment variables")
|
270 |
+
print("Create a .env file with: OPENROUTER_API_KEY=your_key_here")
|
271 |
+
exit(1)
|
272 |
+
import uvicorn
|
273 |
+
uvicorn.run(app, host="0.0.0.0", port=8000)
|
logs/classifier_responses.txt
ADDED
File without changes
|
logs/generator_direct_responses.txt
ADDED
File without changes
|
logs/generator_interpreted_responses.txt
ADDED
File without changes
|
logs/instructTuned_enhance_direct_responses.txt
ADDED
File without changes
|
logs/instructTuned_enhance_interpreted_responses.txt
ADDED
File without changes
|
logs/instructTuned_responses.txt
ADDED
File without changes
|
logs/interpreter_responses.txt
ADDED
File without changes
|
logs/reviewer_compare_responses.txt
ADDED
File without changes
|
logs/reviewer_polish_responses.txt
ADDED
File without changes
|
requirements.txt
ADDED
@@ -0,0 +1,5 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
fastapi
|
2 |
+
httpx
|
3 |
+
pydantic
|
4 |
+
python-dotenv
|
5 |
+
uvicorn
|