Spaces:
Running
Running
File size: 1,531 Bytes
f7c0abb b9e465f fa8e2ce d0fc55f f7c0abb 2372d93 fa8e2ce 6025f1c 9ab6d04 6025f1c 3fdd2e3 2372d93 6025f1c f7c0abb d0fc55f f7c0abb 2372d93 f7c0abb 9ab6d04 6025f1c d0fc55f f7c0abb d0fc55f 045ef7e f7c0abb 045ef7e 2372d93 f7c0abb 2372d93 b9e465f 9ab6d04 fa8e2ce 2372d93 93c4b1f 7a83ce6 20d0b59 387e225 1a836e3 |
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 |
import os
from fastapi import FastAPI, HTTPException
from fastapi.responses import StreamingResponse
from openai import AsyncOpenAI
app = FastAPI()
async def generate_ai_response(prompt: str , model: str):
# Configuration for unofficial GitHub AI endpoint
token = os.getenv("GITHUB_TOKEN")
if not token:
raise HTTPException(status_code=500, detail="GitHub token not configured")
endpoint = "https://models.github.ai/inference"
client = AsyncOpenAI(base_url=endpoint, api_key=token)
try:
stream = await client.chat.completions.create(
messages=[
{"role": "system", "content": "You are a helpful assistant named Orion and made by Abdullah Ali"},
{"role": "user", "content": prompt}
],
model=model,
temperature=1.0,
top_p=1.0,
stream=True
)
async for chunk in stream:
if chunk.choices and chunk.choices[0].delta.content:
yield chunk.choices[0].delta.content
except Exception as err:
yield f"Error: {str(err)}"
raise HTTPException(status_code=500, detail="AI generation failed")
@app.post("/generate")
async def generate_response(prompt: str , model: str):
if not prompt:
raise HTTPException(status_code=400, detail="Prompt cannot be empty")
return StreamingResponse(
generate_ai_response(prompt , model),
media_type="text/event-stream"
)
def get_app():
return app |