Spaces:
Running
Running
Update app.py
Browse files
app.py
CHANGED
@@ -6,16 +6,17 @@ from openai import AsyncOpenAI
|
|
6 |
|
7 |
app = FastAPI()
|
8 |
|
|
|
9 |
class GenerateRequest(BaseModel):
|
10 |
prompt: str
|
11 |
-
model: str #
|
12 |
|
13 |
async def generate_ai_response(prompt: str, model: str):
|
|
|
14 |
token = os.getenv("GITHUB_TOKEN")
|
15 |
if not token:
|
16 |
raise HTTPException(status_code=500, detail="GitHub token not configured")
|
17 |
-
|
18 |
-
# You can also make this endpoint dynamic if needed
|
19 |
endpoint = "https://models.github.ai/inference"
|
20 |
|
21 |
client = AsyncOpenAI(base_url=endpoint, api_key=token)
|
@@ -26,7 +27,7 @@ async def generate_ai_response(prompt: str, model: str):
|
|
26 |
{"role": "system", "content": "You are a helpful assistant."},
|
27 |
{"role": "user", "content": prompt}
|
28 |
],
|
29 |
-
model=model,
|
30 |
temperature=1.0,
|
31 |
top_p=1.0,
|
32 |
stream=True
|
@@ -38,17 +39,20 @@ async def generate_ai_response(prompt: str, model: str):
|
|
38 |
|
39 |
except Exception as err:
|
40 |
yield f"Error: {str(err)}"
|
41 |
-
raise HTTPException(status_code=500, detail="AI generation failed")
|
42 |
|
43 |
@app.post("/generate")
|
44 |
async def generate_response(request: GenerateRequest):
|
45 |
if not request.prompt:
|
46 |
raise HTTPException(status_code=400, detail="Prompt cannot be empty")
|
47 |
-
|
|
|
|
|
|
|
48 |
return StreamingResponse(
|
49 |
generate_ai_response(request.prompt, request.model),
|
50 |
media_type="text/event-stream"
|
51 |
)
|
52 |
|
53 |
def get_app():
|
54 |
-
return app
|
|
|
6 |
|
7 |
app = FastAPI()
|
8 |
|
9 |
+
# Define a request model for the prompt and required model name
|
10 |
class GenerateRequest(BaseModel):
|
11 |
prompt: str
|
12 |
+
model: str # Model is required, no default
|
13 |
|
14 |
async def generate_ai_response(prompt: str, model: str):
|
15 |
+
# Configuration for unofficial GitHub AI endpoint
|
16 |
token = os.getenv("GITHUB_TOKEN")
|
17 |
if not token:
|
18 |
raise HTTPException(status_code=500, detail="GitHub token not configured")
|
19 |
+
|
|
|
20 |
endpoint = "https://models.github.ai/inference"
|
21 |
|
22 |
client = AsyncOpenAI(base_url=endpoint, api_key=token)
|
|
|
27 |
{"role": "system", "content": "You are a helpful assistant."},
|
28 |
{"role": "user", "content": prompt}
|
29 |
],
|
30 |
+
model=model,
|
31 |
temperature=1.0,
|
32 |
top_p=1.0,
|
33 |
stream=True
|
|
|
39 |
|
40 |
except Exception as err:
|
41 |
yield f"Error: {str(err)}"
|
42 |
+
raise HTTPException(status_code=500, detail=f"AI generation failed: {str(err)}")
|
43 |
|
44 |
@app.post("/generate")
|
45 |
async def generate_response(request: GenerateRequest):
|
46 |
if not request.prompt:
|
47 |
raise HTTPException(status_code=400, detail="Prompt cannot be empty")
|
48 |
+
|
49 |
+
if not request.model:
|
50 |
+
raise HTTPException(status_code=400, detail="Model must be specified")
|
51 |
+
|
52 |
return StreamingResponse(
|
53 |
generate_ai_response(request.prompt, request.model),
|
54 |
media_type="text/event-stream"
|
55 |
)
|
56 |
|
57 |
def get_app():
|
58 |
+
return app # Fixed typo from previous code
|