TenduL commited on
Commit
e38b641
·
verified ·
1 Parent(s): 835e259

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +190 -273
app.py CHANGED
@@ -1,273 +1,190 @@
1
- from fastapi import FastAPI, HTTPException, Query
2
- import httpx
3
- import os
4
- import re
5
- from typing import Dict
6
- from pydantic import BaseModel
7
- from dotenv import load_dotenv
8
-
9
- # Load environment variables from .env file
10
- load_dotenv()
11
-
12
- app = FastAPI()
13
-
14
- # Define the models to use for each step with appropriate OpenRouter model IDs
15
- MODELS = {
16
- "classifier": "mistralai/mistral-7b-instruct:free", # For classifying query complexity
17
- "interpreter": "google/gemini-2.0-flash-thinking-exp:free", # For understanding complex queries
18
- "generator": "qwen/qwen2.5-vl-32b-instruct:free", # For code generation
19
- "instructTuned": "mistralai/mistral-7b-instruct:free", # For customization and enhancement
20
- "reviewer": "deepseek/deepseek-r1:free", # For final review
21
- }
22
-
23
- # Get API key from environment variables
24
- OPENROUTER_API_KEY = os.getenv("OPENROUTER_API_KEY")
25
- if not OPENROUTER_API_KEY:
26
- print("WARNING: OPENROUTER_API_KEY not found in environment variables!")
27
-
28
- # Response models
29
- class SimpleResponse(BaseModel):
30
- code: str
31
- steps: Dict[str, str]
32
-
33
- class AmbiguousResponse(BaseModel):
34
- code: str
35
- feedback: str
36
- ambiguous: bool = True
37
-
38
- class ComplexResponse(BaseModel):
39
- code: str
40
- steps: Dict[str, str]
41
-
42
- @app.get("/generate")
43
- async def generate_code(query: str = Query(..., description="The programming query to process")):
44
- try:
45
- if not query:
46
- raise HTTPException(status_code=400, detail="Query is required")
47
-
48
- print("Processing query:", query)
49
-
50
- # STEP 1: Query Classification - Determine complexity
51
- print("Classifying query complexity...")
52
- complexity_analysis = await call_openrouter(
53
- MODELS["classifier"],
54
- f"""Analyze this coding request and classify it as "simple" or "complex" or "ambiguous".
55
- - Simple: Basic requests like "Write a Python function to reverse a string."
56
- - Complex: Requests involving customization, optimization, or multiple features.
57
- - Ambiguous: Unclear requests lacking specifics.
58
- Respond with ONLY the word "simple", "complex", or "ambiguous".
59
- Query: "{query}"""
60
- )
61
-
62
- query_type = re.sub(r'[^a-z]', '', complexity_analysis.strip().lower())
63
- print("Query classified as:", query_type)
64
-
65
- # Process based on complexity
66
- if "ambiguous" in query_type:
67
- return await handle_ambiguous_query(query)
68
- elif "simple" in query_type:
69
- return await handle_simple_query(query)
70
- else:
71
- return await handle_complex_query(query)
72
-
73
- except Exception as error:
74
- print("Error in code generation:", error)
75
- raise HTTPException(status_code=500, detail=f"Failed to generate code: {str(error)}")
76
-
77
- async def handle_ambiguous_query(query: str):
78
- print("Processing ambiguous query...")
79
- feedback = await call_openrouter(
80
- MODELS["interpreter"],
81
- f"""You are a coding assistant. This query is ambiguous or lacks specifics: "{query}".
82
- Please provide helpful feedback on what details are needed to generate the code.
83
- Be specific about what information is missing (e.g., programming language, input/output format, etc.)."""
84
- )
85
-
86
- print("Ambiguous query feedback:", feedback)
87
-
88
- return AmbiguousResponse(
89
- code=f" {feedback}\n\nPlease provide more details so I can generate the appropriate code for you.",
90
- feedback=feedback
91
- )
92
-
93
- async def handle_simple_query(query: str):
94
- print("Processing simple query with single model...")
95
-
96
- code = await call_openrouter(
97
- MODELS["instructTuned"],
98
- f"""You are a programming assistant. Generate clean, well-commented, production-ready code for this request: "{query}".
99
- Include proper error handling and follow best practices for the chosen programming language.
100
- Focus on writing efficient, readable code that directly addresses the request."""
101
- )
102
-
103
- print(f"Simple query result: {code[:100]}...")
104
-
105
- return SimpleResponse(
106
- code=code,
107
- steps={"directCode": code}
108
- )
109
-
110
- async def handle_complex_query(query: str):
111
- print("Processing complex query with full pipeline...")
112
-
113
- # STEP 1: Get detailed interpretation
114
- print("Getting detailed interpretation...")
115
- interpretation = await call_openrouter(
116
- MODELS["interpreter"],
117
- f"""You are a programming assistant. Understand this coding request and convert it into a clear, detailed specification: "{query}"""
118
- )
119
-
120
- print(f"Interpretation complete: {interpretation[:150]}...")
121
-
122
- # Check if interpretation reveals ambiguity
123
- if any(word in interpretation.lower() for word in ["unclear", "ambiguous", "need more information"]):
124
- print("Interpretation indicates ambiguity, redirecting to ambiguous handler")
125
- return await handle_ambiguous_query(query)
126
-
127
- # STEP 2-4: Run the remaining steps in parallel paths for efficiency
128
- print("Running parallel processing paths...")
129
- try:
130
- # PATH 1: Direct code generation with enhancement
131
- async def direct_code_path():
132
- print("PATH 1: Starting direct code generation")
133
- # Generate code directly from query
134
- direct_code = await call_openrouter(
135
- MODELS["generator"],
136
- f"""You are a code generation expert. Generate clean, well-commented, production-ready code for: "{query}".
137
- Include proper error handling and follow best practices."""
138
- )
139
-
140
- print("PATH 1: Starting code enhancement")
141
- # Enhance direct code
142
- enhanced_direct_code = await call_openrouter(
143
- MODELS["instructTuned"],
144
- f"""Improve this code for better performance and readability. Apply any specific customizations
145
- mentioned in this request: "{query}"\n\nCode:\n{direct_code}"""
146
- )
147
-
148
- print("PATH 1: Complete")
149
- return {"directCode": direct_code, "enhancedDirectCode": enhanced_direct_code}
150
-
151
- # PATH 2: Interpreted code generation with enhancement
152
- async def interpreted_code_path():
153
- print("PATH 2: Starting interpreted code generation")
154
- # Generate code from interpretation
155
- interpreted_code = await call_openrouter(
156
- MODELS["generator"],
157
- f"""Generate clean, well-commented code based on this specification: "{interpretation}"""
158
- )
159
-
160
- print("PATH 2: Starting code enhancement")
161
- # Enhance interpreted code
162
- enhanced_interpreted_code = await call_openrouter(
163
- MODELS["instructTuned"],
164
- f"""Improve this code for better performance, readability, and error handling:\n\n{interpreted_code}"""
165
- )
166
-
167
- print("PATH 2: Complete")
168
- return {"interpretedCode": interpreted_code, "enhancedInterpretedCode": enhanced_interpreted_code}
169
-
170
- import asyncio
171
- direct_path_result, interpreted_path_result = await asyncio.gather(
172
- direct_code_path(),
173
- interpreted_code_path()
174
- )
175
-
176
- # STEP 5: Select the best implementation
177
- print("Selecting best implementation...")
178
- selection_prompt = f"""You are a code selection expert. Choose the better implementation that is more correct, efficient, and readable.
179
- IMPLEMENTATION 1:\n{direct_path_result['enhancedDirectCode']}\n\n
180
- IMPLEMENTATION 2:\n{interpreted_path_result['enhancedInterpretedCode']}\n\n
181
- Respond with ONLY the complete selected implementation, no explanation needed."""
182
-
183
- best_implementation = await call_openrouter(MODELS["reviewer"], selection_prompt)
184
- print("Best implementation selected.")
185
-
186
- # STEP 6: Final review and polishing
187
- print("Performing final code review...")
188
- final_response = await call_openrouter(
189
- MODELS["reviewer"],
190
- f"""Review this code and ensure it meets industry best practices, security standards, and handles errors properly.
191
- Add thorough comments explaining the key components and any optimizations you've made.
192
- Provide the final, improved version:\n\n{best_implementation}"""
193
- )
194
-
195
- print("Final response generated. Process complete.")
196
-
197
- # Combine all steps for the response
198
- steps = {
199
- "interpretation": interpretation,
200
- "directCode": direct_path_result["directCode"],
201
- "interpretedCode": interpreted_path_result["interpretedCode"],
202
- "enhancedDirectCode": direct_path_result["enhancedDirectCode"],
203
- "enhancedInterpretedCode": interpreted_path_result["enhancedInterpretedCode"],
204
- "bestImplementation": best_implementation
205
- }
206
-
207
- return ComplexResponse(
208
- code=final_response,
209
- steps=steps
210
- )
211
- except Exception as error:
212
- print("Error in complex query processing:", error)
213
-
214
- # Fallback to simple query handling if something fails in the complex path
215
- print("Falling back to simple query handling...")
216
- return await handle_simple_query(query)
217
-
218
- async def call_openrouter(model: str, input: str):
219
- try:
220
- print(f"Calling OpenRouter with model: {model}")
221
-
222
- async with httpx.AsyncClient(timeout=60.0) as client:
223
- response = await client.post(
224
- "https://openrouter.ai/api/v1/chat/completions",
225
- json={
226
- "model": model,
227
- "messages": [{"role": "user", "content": input}],
228
- "temperature": 0.5,
229
- "max_tokens": 9999,
230
- },
231
- headers={
232
- "Authorization": f"Bearer {OPENROUTER_API_KEY}",
233
- "HTTP-Referer": "https://conv-code-generation-agent.vercel.app",
234
- "X-Title": "Code Generation Agent"
235
- },
236
- )
237
-
238
- response_data = response.json()
239
- if response_data and "choices" in response_data and response_data["choices"]:
240
- content = response_data["choices"][0]["message"]["content"]
241
-
242
- # Log model response with clear separation
243
- print(f"\n========== RESPONSE FROM {model} ==========")
244
- print(content[:500] + ('...' if len(content) > 500 else ''))
245
- print('==========================================\n')
246
-
247
- return content
248
- else:
249
- print("Unexpected response format:", response_data)
250
- return "Unexpected API response format"
251
- except Exception as error:
252
- error_message = getattr(error, "response", {}).get("data", str(error))
253
- print(f"Error calling OpenRouter with model {model}:", error_message)
254
- # Return error message instead of throwing
255
- return f"Failed to generate with {model}. Error: {str(error)}"
256
-
257
- # For documentation and browser testing
258
- @app.get("/")
259
- async def root():
260
- return {
261
- "message": "Code Generation API",
262
- "usage": "Use GET /generate?query=your programming question",
263
- "example": "/generate?query=Write a Python function to reverse a string"
264
- }
265
-
266
- # For running the app locally
267
- if __name__ == "__main__":
268
- if not OPENROUTER_API_KEY:
269
- print("ERROR: OPENROUTER_API_KEY must be set in .env file or environment variables")
270
- print("Create a .env file with: OPENROUTER_API_KEY=your_key_here")
271
- exit(1)
272
- import uvicorn
273
- uvicorn.run(app, host="0.0.0.0", port=8000)
 
1
+ from fastapi import FastAPI, HTTPException, Query
2
+ from fastapi.responses import FileResponse
3
+ import httpx
4
+ import os
5
+ import re
6
+ from typing import Dict
7
+ from pydantic import BaseModel
8
+ from dotenv import load_dotenv
9
+ import asyncio
10
+
11
+ # Load environment variables from .env file
12
+ load_dotenv()
13
+
14
+ app = FastAPI()
15
+ # Serve the favicon.ico file
16
+ @app.get("/favicon.ico", include_in_schema=False)
17
+ async def favicon():
18
+ # Ensure the favicon.ico file exists in the same directory as this script
19
+ favicon_path = os.path.join(os.path.dirname(__file__), "favicon.ico")
20
+ if os.path.exists(favicon_path):
21
+ return FileResponse(favicon_path)
22
+ else:
23
+ return {"error": "favicon.ico not found"}
24
+
25
+ # Define the models to use for each step with appropriate OpenRouter model IDs
26
+ MODELS = {
27
+ "classifier": "mistralai/mistral-7b-instruct:free",
28
+ "interpreter": "google/gemini-2.0-flash-thinking-exp:free",
29
+ "generator": "qwen/qwen2.5-vl-32b-instruct:free",
30
+ "instructTuned": "mistralai/mistral-7b-instruct:free",
31
+ "reviewer": "deepseek/deepseek-r1:free",
32
+ }
33
+
34
+ # Get API key from environment variables
35
+ OPENROUTER_API_KEY = os.getenv("OPENROUTER_API_KEY")
36
+ if not OPENROUTER_API_KEY:
37
+ print("WARNING: OPENROUTER_API_KEY not found in environment variables!")
38
+
39
+ # Response models
40
+ class SimpleResponse(BaseModel):
41
+ code: str
42
+ steps: Dict[str, str]
43
+
44
+ class AmbiguousResponse(BaseModel):
45
+ code: str
46
+ feedback: str
47
+ ambiguous: bool = True
48
+
49
+ class ComplexResponse(BaseModel):
50
+ code: str
51
+ steps: Dict[str, str]
52
+
53
+ # Utility functions
54
+ async def call_openrouter(model: str, input: str) -> str:
55
+ """Reusable function to call OpenRouter API."""
56
+ try:
57
+ async with httpx.AsyncClient(timeout=60.0) as client:
58
+ response = await client.post(
59
+ "https://openrouter.ai/api/v1/chat/completions",
60
+ json={
61
+ "model": model,
62
+ "messages": [{"role": "user", "content": input}],
63
+ "temperature": 0.5,
64
+ "max_tokens": 9999,
65
+ },
66
+ headers={
67
+ "Authorization": f"Bearer {OPENROUTER_API_KEY}",
68
+ "HTTP-Referer": "https://conv-code-generation-agent.vercel.app",
69
+ "X-Title": "Code Generation Agent",
70
+ },
71
+ )
72
+ response_data = response.json()
73
+ if response_data and "choices" in response_data and response_data["choices"]:
74
+ return response_data["choices"][0]["message"]["content"]
75
+ else:
76
+ return "Unexpected API response format"
77
+ except Exception as error:
78
+ return f"Failed to generate with {model}. Error: {str(error)}"
79
+
80
+ async def enhance_code(query: str, code: str) -> str:
81
+ """Reusable function to enhance code."""
82
+ return await call_openrouter(
83
+ MODELS["instructTuned"],
84
+ f"""Improve this code for better performance and readability. Apply any specific customizations
85
+ mentioned in this request: "{query}"\n\nCode:\n{code}"""
86
+ )
87
+
88
+ # Prompts dictionary
89
+ PROMPTS = {
90
+ "classify_query": lambda query: f"""Analyze this coding request and classify it as "simple", "complex", or "ambiguous".
91
+ - Simple: Basic requests like "Write a Python function to reverse a string."
92
+ - Complex: Requests involving customization, optimization, or multiple features.
93
+ - Ambiguous: Unclear requests lacking specifics.
94
+ Respond with ONLY the word "simple", "complex", or "ambiguous".
95
+ Query: "{query}""",
96
+ "ambiguous_feedback": lambda query: f"""You are a coding assistant. This query is ambiguous or lacks specifics: "{query}".
97
+ Please provide helpful feedback on what details are needed to generate the code.""",
98
+ "generate_code": lambda query: f"""You are a programming assistant. Generate clean, well-commented, production-ready code for this request: "{query}".""",
99
+ "interpret_query": lambda query: f"""You are a programming assistant. Understand this coding request and convert it into a clear, detailed specification: "{query}".""",
100
+ "select_best": lambda impl1, impl2: f"""You are a code selection expert. Choose the better implementation that is more correct, efficient, and readable.
101
+ IMPLEMENTATION 1:\n{impl1}\n\n
102
+ IMPLEMENTATION 2:\n{impl2}\n\n
103
+ Respond with ONLY the complete selected implementation, no explanation needed.""",
104
+ "final_review": lambda code: f"""Review this code and ensure it meets industry best practices, security standards, and handles errors properly.
105
+ Add thorough comments explaining the key components and any optimizations you've made.
106
+ Provide the final, improved version:\n\n{code}""",
107
+ }
108
+
109
+ # Query handlers
110
+ async def handle_ambiguous_query(query: str) -> AmbiguousResponse:
111
+ feedback = await call_openrouter(MODELS["interpreter"], PROMPTS["ambiguous_feedback"](query))
112
+ return AmbiguousResponse(
113
+ code=f"{feedback}\n\nPlease provide more details so I can generate the appropriate code for you.",
114
+ feedback=feedback,
115
+ )
116
+
117
+ async def handle_simple_query(query: str) -> SimpleResponse:
118
+ code = await call_openrouter(MODELS["instructTuned"], PROMPTS["generate_code"](query))
119
+ return SimpleResponse(code=code, steps={"directCode": code})
120
+
121
+ async def handle_complex_query(query: str) -> ComplexResponse:
122
+ interpretation = await call_openrouter(MODELS["interpreter"], PROMPTS["interpret_query"](query))
123
+
124
+ if any(word in interpretation.lower() for word in ["unclear", "ambiguous", "need more information"]):
125
+ return await handle_ambiguous_query(query)
126
+
127
+ async def direct_code_path():
128
+ direct_code = await call_openrouter(MODELS["generator"], PROMPTS["generate_code"](query))
129
+ enhanced_direct_code = await enhance_code(query, direct_code)
130
+ return {"directCode": direct_code, "enhancedDirectCode": enhanced_direct_code}
131
+
132
+ async def interpreted_code_path():
133
+ interpreted_code = await call_openrouter(MODELS["generator"], PROMPTS["generate_code"](interpretation))
134
+ enhanced_interpreted_code = await enhance_code(query, interpreted_code)
135
+ return {"interpretedCode": interpreted_code, "enhancedInterpretedCode": enhanced_interpreted_code}
136
+
137
+ direct_path_result, interpreted_path_result = await asyncio.gather(
138
+ direct_code_path(),
139
+ interpreted_code_path(),
140
+ )
141
+
142
+ best_implementation = await call_openrouter(
143
+ MODELS["reviewer"],
144
+ PROMPTS["select_best"](direct_path_result["enhancedDirectCode"], interpreted_path_result["enhancedInterpretedCode"]),
145
+ )
146
+
147
+ final_response = await call_openrouter(MODELS["reviewer"], PROMPTS["final_review"](best_implementation))
148
+
149
+ steps = {
150
+ "interpretation": interpretation,
151
+ **direct_path_result,
152
+ **interpreted_path_result,
153
+ "bestImplementation": best_implementation,
154
+ }
155
+
156
+ return ComplexResponse(code=final_response, steps=steps)
157
+
158
+ # Main endpoint
159
+ @app.get("/generate")
160
+ async def generate_code(query: str = Query(..., description="The programming query to process")):
161
+ if not query:
162
+ raise HTTPException(status_code=400, detail="Query is required")
163
+
164
+ query_type = await call_openrouter(MODELS["classifier"], PROMPTS["classify_query"](query))
165
+ query_type = re.sub(r'[^a-z]', '', query_type.strip().lower())
166
+
167
+ if "ambiguous" in query_type:
168
+ return await handle_ambiguous_query(query)
169
+ elif "simple" in query_type:
170
+ return await handle_simple_query(query)
171
+ else:
172
+ return await handle_complex_query(query)
173
+
174
+ # Root endpoint for testing
175
+ @app.get("/")
176
+ async def root():
177
+ return {
178
+ "message": "Code Generation API",
179
+ "usage": "Use GET /generate?query=your programming question",
180
+ "example": "/generate?query=Write a Python function to reverse a string",
181
+ }
182
+
183
+ # For running the app locally
184
+ if __name__ == "__main__":
185
+ if not OPENROUTER_API_KEY:
186
+ print("ERROR: OPENROUTER_API_KEY must be set in .env file or environment variables")
187
+ print("Create a .env file with: OPENROUTER_API_KEY=your_key_here")
188
+ exit(1)
189
+ import uvicorn
190
+ uvicorn.run(app, host="0.0.0.0", port=8000)