vision / app.py
abdullahalioo's picture
Update app.py
194ad81 verified
raw
history blame
1.51 kB
import os
from fastapi import FastAPI, Request
from fastapi.responses import StreamingResponse
from openai import AsyncOpenAI
import asyncio
app = FastAPI()
async def generate_ai_response(prompt: str):
# Get GitHub token from environment variable
token = os.getenv("GITHUB_TOKEN")
endpoint = "https://models.github.ai/inference"
model = "openai/gpt-4-1-mini" # Fixed typo in model name (was gpt-4.1-mini)
# Initialize OpenAI client
client = AsyncOpenAI(base_url=endpoint, api_key=token)
try:
# Create streaming chat completion
stream = await client.chat.completions.create(
messages=[
{"role": "system", "content": "You are a helpful assistant."},
{"role": "user", "content": prompt}
],
temperature=1.0,
top_p=1.0,
model=model,
stream=True
)
# Process the stream
async for chunk in stream:
if chunk.choices and len(chunk.choices) > 0:
content = chunk.choices[0].delta.content or ""
yield content
except Exception as err:
yield f"The sample encountered an error: {err}"
@app.post("/generate")
async def generate_response(request: Request):
data = await request.json()
prompt = data.get("prompt", "what is ai") # Default prompt if none provided
return StreamingResponse(
generate_ai_response(prompt),
media_type="text/event-stream"
)