Spaces:
Running
Running
Update app.py
Browse files
app.py
CHANGED
@@ -1,25 +1,29 @@
|
|
1 |
import os
|
2 |
-
from fastapi import FastAPI, HTTPException
|
3 |
from fastapi.responses import StreamingResponse
|
|
|
4 |
from openai import AsyncOpenAI
|
|
|
5 |
|
6 |
app = FastAPI()
|
7 |
|
8 |
-
|
9 |
-
|
|
|
|
|
|
|
10 |
token = os.getenv("GITHUB_TOKEN")
|
|
|
|
|
11 |
if not token:
|
12 |
raise HTTPException(status_code=500, detail="GitHub token not configured")
|
13 |
-
|
14 |
-
endpoint = "https://models.github.ai/inference"
|
15 |
-
|
16 |
|
17 |
client = AsyncOpenAI(base_url=endpoint, api_key=token)
|
18 |
|
19 |
-
try
|
20 |
stream = await client.chat.completions.create(
|
21 |
messages=[
|
22 |
-
{"role": "system", "content": "You are a helpful assistant named Orion
|
23 |
{"role": "user", "content": prompt}
|
24 |
],
|
25 |
model=model,
|
@@ -34,17 +38,34 @@ async def generate_ai_response(prompt: str , model: str):
|
|
34 |
|
35 |
except Exception as err:
|
36 |
yield f"Error: {str(err)}"
|
37 |
-
raise HTTPException(status_code=500, detail="AI generation failed")
|
38 |
|
39 |
-
@app.post("/generate")
|
40 |
-
async def generate_response(
|
41 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
42 |
raise HTTPException(status_code=400, detail="Prompt cannot be empty")
|
43 |
|
|
|
|
|
|
|
44 |
return StreamingResponse(
|
45 |
-
generate_ai_response(
|
46 |
media_type="text/event-stream"
|
47 |
)
|
48 |
|
49 |
def get_app():
|
50 |
-
return app
|
|
|
|
1 |
import os
|
2 |
+
from fastapi import FastAPI, HTTPException, Query
|
3 |
from fastapi.responses import StreamingResponse
|
4 |
+
from pydantic import BaseModel
|
5 |
from openai import AsyncOpenAI
|
6 |
+
from typing import Optional
|
7 |
|
8 |
app = FastAPI()
|
9 |
|
10 |
+
class GenerateRequest(BaseModel):
|
11 |
+
prompt: str
|
12 |
+
|
13 |
+
async def generate_ai_response(prompt: str, model: str):
|
14 |
+
# Configuration for AI endpoint
|
15 |
token = os.getenv("GITHUB_TOKEN")
|
16 |
+
endpoint = os.getenv("AI_SERVER_URL", "https://models.github.ai/inference") # Default fallback
|
17 |
+
|
18 |
if not token:
|
19 |
raise HTTPException(status_code=500, detail="GitHub token not configured")
|
|
|
|
|
|
|
20 |
|
21 |
client = AsyncOpenAI(base_url=endpoint, api_key=token)
|
22 |
|
23 |
+
try
|
24 |
stream = await client.chat.completions.create(
|
25 |
messages=[
|
26 |
+
{"role": "system", "content": "You are a helpful assistant named Orion, created by Abdullah Ali"},
|
27 |
{"role": "user", "content": prompt}
|
28 |
],
|
29 |
model=model,
|
|
|
38 |
|
39 |
except Exception as err:
|
40 |
yield f"Error: {str(err)}"
|
41 |
+
raise HTTPException(status_code=500, detail=f"AI generation failed: {str(err)}")
|
42 |
|
43 |
+
@app.post("/generate", summary="Generate AI response", response_description="Streaming AI response")
|
44 |
+
async def generate_response(
|
45 |
+
model: str = Query("default-model", description="The AI model to use"),
|
46 |
+
prompt: Optional[str] = Query(None, description="The input text prompt for the AI"),
|
47 |
+
request: Optional[GenerateRequest] = None
|
48 |
+
):
|
49 |
+
"""
|
50 |
+
Generate a streaming AI response based on the provided prompt and model.
|
51 |
+
|
52 |
+
- **model**: The AI model to use (specified as a query parameter, defaults to default-model)
|
53 |
+
- **prompt**: The input text prompt for the AI (can be in query parameter or request body)
|
54 |
+
"""
|
55 |
+
# Determine prompt source: query parameter or request body
|
56 |
+
final_prompt = prompt if prompt is not None else (request.prompt if request is not None else None)
|
57 |
+
|
58 |
+
if not final_prompt or not final_prompt.strip():
|
59 |
raise HTTPException(status_code=400, detail="Prompt cannot be empty")
|
60 |
|
61 |
+
if not model or not model.strip():
|
62 |
+
raise HTTPException(status_code=400, detail="Model cannot be empty")
|
63 |
+
|
64 |
return StreamingResponse(
|
65 |
+
generate_ai_response(final_prompt, model),
|
66 |
media_type="text/event-stream"
|
67 |
)
|
68 |
|
69 |
def get_app():
|
70 |
+
return app
|
71 |
+
|