Spaces:
Sleeping
Sleeping
import os | |
from fastapi import FastAPI, HTTPException | |
from fastapi.responses import StreamingResponse, Response | |
from openai import AsyncOpenAI | |
app = FastAPI() | |
# Initialize global token | |
token = os.getenv("GITHUB_TOKEN") | |
async def generate_ai_response(prompt: str): | |
global token | |
if not token: | |
raise HTTPException(status_code=500, detail="GitHub token not configured") | |
endpoint = "https://models.github.ai/inference" | |
model = "openai/gpt-4.1-mini" # Unofficial model name | |
client = AsyncOpenAI(base_url=endpoint, api_key=token) | |
try: | |
stream = await client.chat.completions.create( | |
messages=[ | |
{"role": "system", "content": "You are a helpful assistant."}, | |
{"role": "user", "content": prompt} | |
], | |
model=model, | |
temperature=1.0, | |
top_p=1.0, | |
stream=True | |
) | |
async for chunk in stream: | |
if chunk.choices and chunk.choices[0].delta.content: | |
yield chunk.choices[0].delta.content | |
except Exception as err: | |
yield f"Error: {str(err)}" | |
raise HTTPException(status_code=500, detail="AI generation failed") | |
class CustomStreamingResponse(Response): | |
def __init__(self, content, token, media_type="text/event-stream", status_code=200): | |
super().__init__(content=content, media_type=media_type, status_code=status_code) | |
self.token = token | |
async def __call__(self, scope, receive, send): | |
await send({ | |
"type": "http.response.start", | |
"status": self.status_code, | |
"headers": [ | |
(b"content-type", self.media_type.encode()), | |
(b"x-token-value", self.token.encode()) | |
] | |
}) | |
async for chunk in self.body_iterator: | |
await send({ | |
"type": "http.response.body", | |
"body": chunk.encode() if isinstance(chunk, str) else chunk, | |
"more_body": True | |
}) | |
await send({ | |
"type": "http.response.body", | |
"body": b"", | |
"more_body": False | |
}) | |
async def generate_response(prompt: str): | |
if not prompt: | |
raise HTTPException(status_code=400, detail="Prompt cannot be empty") | |
global token | |
return CustomStreamingResponse( | |
content=generate_ai_response(prompt), | |
token=token, | |
media_type="text/event-stream" | |
) | |
async def get_token(): | |
global token | |
if not token: | |
raise HTTPException(status_code=500, detail="GitHub token not configured") | |
return {"token": token} | |
def get_app(): | |
return app |