Spaces:
Sleeping
Sleeping
Update app.py
Browse files
app.py
CHANGED
@@ -1,12 +1,13 @@
|
|
1 |
import os
|
|
|
2 |
from fastapi import FastAPI, HTTPException, Query
|
3 |
from fastapi.responses import StreamingResponse
|
4 |
-
from openai import AsyncOpenAI
|
5 |
from collections import defaultdict
|
|
|
6 |
|
7 |
app = FastAPI()
|
8 |
|
9 |
-
#
|
10 |
AVAILABLE_MODELS = {
|
11 |
"openai/gpt-4.1": "OpenAI GPT-4.1",
|
12 |
"openai/gpt-4.1-mini": "OpenAI GPT-4.1-mini",
|
@@ -55,53 +56,56 @@ AVAILABLE_MODELS = {
|
|
55 |
"mistral-ai/mistral-small-2503": "Mistral Small 3.1"
|
56 |
}
|
57 |
|
58 |
-
# In-memory
|
59 |
chat_histories = defaultdict(list)
|
60 |
|
61 |
-
#
|
62 |
-
async def generate_ai_response(chat_id: str, model: str):
|
63 |
token = os.getenv("GITHUB_TOKEN")
|
64 |
if not token:
|
65 |
raise HTTPException(status_code=500, detail="GitHub token not configured")
|
66 |
|
67 |
-
endpoint = "https://models.github.ai/inference"
|
68 |
-
|
69 |
if model not in AVAILABLE_MODELS:
|
70 |
-
raise HTTPException(status_code=400, detail=f"
|
71 |
-
|
72 |
-
client = AsyncOpenAI(base_url=endpoint, api_key=token)
|
73 |
|
74 |
-
|
75 |
-
|
76 |
-
|
77 |
-
|
78 |
-
temperature=1.0,
|
79 |
-
top_p=1.0,
|
80 |
-
stream=True
|
81 |
-
)
|
82 |
|
83 |
-
|
84 |
-
|
85 |
-
|
86 |
-
|
87 |
-
|
88 |
-
|
|
|
89 |
|
90 |
-
|
91 |
-
|
92 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
93 |
|
94 |
-
#
|
95 |
@app.post("/generate")
|
96 |
async def generate_response(
|
97 |
-
chat_id: str = Query(..., description="
|
98 |
-
prompt: str = Query(..., description="
|
99 |
-
model: str = Query("openai/gpt-4.1-mini", description="
|
100 |
):
|
101 |
if not prompt:
|
102 |
raise HTTPException(status_code=400, detail="Prompt cannot be empty")
|
103 |
-
|
104 |
-
# Add user message to history
|
105 |
chat_histories[chat_id].append({"role": "user", "content": prompt})
|
106 |
|
107 |
return StreamingResponse(
|
@@ -109,14 +113,13 @@ async def generate_response(
|
|
109 |
media_type="text/event-stream"
|
110 |
)
|
111 |
|
112 |
-
#
|
113 |
@app.post("/reset")
|
114 |
-
async def reset_chat(chat_id: str = Query(
|
115 |
if chat_id in chat_histories:
|
116 |
chat_histories[chat_id].clear()
|
117 |
return {"message": f"Chat {chat_id} history reset."}
|
118 |
-
|
119 |
-
raise HTTPException(status_code=404, detail="Chat ID not found")
|
120 |
|
121 |
def get_app():
|
122 |
return app
|
|
|
1 |
import os
|
2 |
+
import httpx
|
3 |
from fastapi import FastAPI, HTTPException, Query
|
4 |
from fastapi.responses import StreamingResponse
|
|
|
5 |
from collections import defaultdict
|
6 |
+
from typing import AsyncGenerator
|
7 |
|
8 |
app = FastAPI()
|
9 |
|
10 |
+
# Model list (unchanged)
|
11 |
AVAILABLE_MODELS = {
|
12 |
"openai/gpt-4.1": "OpenAI GPT-4.1",
|
13 |
"openai/gpt-4.1-mini": "OpenAI GPT-4.1-mini",
|
|
|
56 |
"mistral-ai/mistral-small-2503": "Mistral Small 3.1"
|
57 |
}
|
58 |
|
59 |
+
# In-memory history
|
60 |
chat_histories = defaultdict(list)
|
61 |
|
62 |
+
# Async generator for AI response
|
63 |
+
async def generate_ai_response(chat_id: str, model: str) -> AsyncGenerator[str, None]:
|
64 |
token = os.getenv("GITHUB_TOKEN")
|
65 |
if not token:
|
66 |
raise HTTPException(status_code=500, detail="GitHub token not configured")
|
67 |
|
|
|
|
|
68 |
if model not in AVAILABLE_MODELS:
|
69 |
+
raise HTTPException(status_code=400, detail=f"Invalid model. Choose from: {', '.join(AVAILABLE_MODELS.keys())}")
|
|
|
|
|
70 |
|
71 |
+
headers = {
|
72 |
+
"Authorization": f"Bearer {token}",
|
73 |
+
"Content-Type": "application/json"
|
74 |
+
}
|
|
|
|
|
|
|
|
|
75 |
|
76 |
+
payload = {
|
77 |
+
"model": model,
|
78 |
+
"messages": chat_histories[chat_id],
|
79 |
+
"stream": True,
|
80 |
+
"temperature": 1.0,
|
81 |
+
"top_p": 1.0
|
82 |
+
}
|
83 |
|
84 |
+
async with httpx.AsyncClient(timeout=60.0) as client:
|
85 |
+
try:
|
86 |
+
async with client.stream("POST", "https://models.github.ai/inference", headers=headers, json=payload) as response:
|
87 |
+
async for line in response.aiter_lines():
|
88 |
+
if line.startswith("data:"):
|
89 |
+
data = line[len("data:"):].strip()
|
90 |
+
if data == "[DONE]":
|
91 |
+
break
|
92 |
+
if data:
|
93 |
+
yield f"{data}\n"
|
94 |
+
# Optionally: append to chat history
|
95 |
+
chat_histories[chat_id].append({"role": "assistant", "content": data})
|
96 |
+
except Exception as e:
|
97 |
+
yield f"Error: {str(e)}"
|
98 |
|
99 |
+
# Generate response endpoint
|
100 |
@app.post("/generate")
|
101 |
async def generate_response(
|
102 |
+
chat_id: str = Query(..., description="Chat session ID"),
|
103 |
+
prompt: str = Query(..., description="User input message"),
|
104 |
+
model: str = Query("openai/gpt-4.1-mini", description="Model to use")
|
105 |
):
|
106 |
if not prompt:
|
107 |
raise HTTPException(status_code=400, detail="Prompt cannot be empty")
|
108 |
+
|
|
|
109 |
chat_histories[chat_id].append({"role": "user", "content": prompt})
|
110 |
|
111 |
return StreamingResponse(
|
|
|
113 |
media_type="text/event-stream"
|
114 |
)
|
115 |
|
116 |
+
# Reset chat history endpoint
|
117 |
@app.post("/reset")
|
118 |
+
async def reset_chat(chat_id: str = Query(...)):
|
119 |
if chat_id in chat_histories:
|
120 |
chat_histories[chat_id].clear()
|
121 |
return {"message": f"Chat {chat_id} history reset."}
|
122 |
+
raise HTTPException(status_code=404, detail="Chat ID not found")
|
|
|
123 |
|
124 |
def get_app():
|
125 |
return app
|