Spaces:
Running
Running
Update app.py
Browse files
app.py
CHANGED
@@ -1,23 +1,30 @@
|
|
1 |
import os
|
|
|
2 |
from fastapi import FastAPI, HTTPException, Query
|
3 |
from fastapi.responses import StreamingResponse
|
4 |
from pydantic import BaseModel
|
5 |
from openai import AsyncOpenAI
|
6 |
from typing import Optional
|
7 |
|
|
|
|
|
|
|
|
|
8 |
app = FastAPI()
|
9 |
|
10 |
class GenerateRequest(BaseModel):
|
11 |
prompt: str
|
12 |
|
13 |
async def generate_ai_response(prompt: str, model: str):
|
14 |
-
|
15 |
token = os.getenv("GITHUB_TOKEN")
|
16 |
-
endpoint = os.getenv("AI_SERVER_URL", "https://models.github.ai/inference")
|
17 |
|
18 |
if not token:
|
|
|
19 |
raise HTTPException(status_code=500, detail="GitHub token not configured")
|
20 |
|
|
|
21 |
client = AsyncOpenAI(base_url=endpoint, api_key=token)
|
22 |
|
23 |
try:
|
@@ -37,6 +44,7 @@ async def generate_ai_response(prompt: str, model: str):
|
|
37 |
yield chunk.choices[0].delta.content
|
38 |
|
39 |
except Exception as err:
|
|
|
40 |
yield f"Error: {str(err)}"
|
41 |
raise HTTPException(status_code=500, detail=f"AI generation failed: {str(err)}")
|
42 |
|
@@ -46,19 +54,15 @@ async def generate_response(
|
|
46 |
prompt: Optional[str] = Query(None, description="The input text prompt for the AI"),
|
47 |
request: Optional[GenerateRequest] = None
|
48 |
):
|
49 |
-
""
|
50 |
-
Generate a streaming AI response based on the provided prompt and model.
|
51 |
-
|
52 |
-
- **model**: The AI model to use (specified as a query parameter, defaults to default-model)
|
53 |
-
- **prompt**: The input text prompt for the AI (can be in query parameter or request body)
|
54 |
-
"""
|
55 |
-
# Determine prompt source: query parameter or request body
|
56 |
final_prompt = prompt if prompt is not None else (request.prompt if request is not None else None)
|
57 |
|
58 |
if not final_prompt or not final_prompt.strip():
|
|
|
59 |
raise HTTPException(status_code=400, detail="Prompt cannot be empty")
|
60 |
|
61 |
if not model or not model.strip():
|
|
|
62 |
raise HTTPException(status_code=400, detail="Model cannot be empty")
|
63 |
|
64 |
return StreamingResponse(
|
@@ -68,4 +72,3 @@ async def generate_response(
|
|
68 |
|
69 |
def get_app():
|
70 |
return app
|
71 |
-
|
|
|
1 |
import os
|
2 |
+
import logging
|
3 |
from fastapi import FastAPI, HTTPException, Query
|
4 |
from fastapi.responses import StreamingResponse
|
5 |
from pydantic import BaseModel
|
6 |
from openai import AsyncOpenAI
|
7 |
from typing import Optional
|
8 |
|
9 |
+
# Configure logging
|
10 |
+
logging.basicConfig(level=logging.DEBUG)
|
11 |
+
logger = logging.getLogger(__name__)
|
12 |
+
|
13 |
app = FastAPI()
|
14 |
|
15 |
class GenerateRequest(BaseModel):
|
16 |
prompt: str
|
17 |
|
18 |
async def generate_ai_response(prompt: str, model: str):
|
19 |
+
logger.debug(f"Received prompt: {prompt}, model: {model}")
|
20 |
token = os.getenv("GITHUB_TOKEN")
|
21 |
+
endpoint = os.getenv("AI_SERVER_URL", "https://models.github.ai/inference")
|
22 |
|
23 |
if not token:
|
24 |
+
logger.error("GitHub token not configured")
|
25 |
raise HTTPException(status_code=500, detail="GitHub token not configured")
|
26 |
|
27 |
+
logger.debug(f"Using endpoint: {endpoint}")
|
28 |
client = AsyncOpenAI(base_url=endpoint, api_key=token)
|
29 |
|
30 |
try:
|
|
|
44 |
yield chunk.choices[0].delta.content
|
45 |
|
46 |
except Exception as err:
|
47 |
+
logger.error(f"AI generation failed: {str(err)}")
|
48 |
yield f"Error: {str(err)}"
|
49 |
raise HTTPException(status_code=500, detail=f"AI generation failed: {str(err)}")
|
50 |
|
|
|
54 |
prompt: Optional[str] = Query(None, description="The input text prompt for the AI"),
|
55 |
request: Optional[GenerateRequest] = None
|
56 |
):
|
57 |
+
logger.debug(f"Request received - model: {model}, prompt: {prompt}, body: {request}")
|
|
|
|
|
|
|
|
|
|
|
|
|
58 |
final_prompt = prompt if prompt is not None else (request.prompt if request is not None else None)
|
59 |
|
60 |
if not final_prompt or not final_prompt.strip():
|
61 |
+
logger.error("Prompt cannot be empty")
|
62 |
raise HTTPException(status_code=400, detail="Prompt cannot be empty")
|
63 |
|
64 |
if not model or not model.strip():
|
65 |
+
logger.error("Model cannot be empty")
|
66 |
raise HTTPException(status_code=400, detail="Model cannot be empty")
|
67 |
|
68 |
return StreamingResponse(
|
|
|
72 |
|
73 |
def get_app():
|
74 |
return app
|
|