Spaces:
Running
Running
File size: 5,694 Bytes
f7c0abb 0ca6f76 e7b1f60 fa8e2ce 0ca6f76 256ed7f f7c0abb 05d6121 f9d8346 0ca6f76 e7b1f60 256ed7f e7b1f60 b685be0 256ed7f b685be0 465b43c b685be0 0ca6f76 fa8e2ce 6025f1c b685be0 6025f1c 256ed7f e7b1f60 b685be0 0ca6f76 b685be0 6025f1c 0ca6f76 b685be0 f7c0abb 0ca6f76 b685be0 f7c0abb 0ca6f76 b685be0 0ca6f76 f7c0abb b685be0 05d6121 e7b1f60 0ca6f76 b685be0 984a117 e7b1f60 b685be0 0ca6f76 b685be0 256ed7f fa8e2ce 256ed7f 93c4b1f 7a83ce6 20d0b59 b685be0 256ed7f b685be0 256ed7f b685be0 256ed7f b685be0 387e225 256ed7f |
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 |
import os
import asyncio
from fastapi import FastAPI, HTTPException, Query
from fastapi.responses import StreamingResponse
from openai import AsyncOpenAI
from collections import defaultdict
app = FastAPI()
# Define available models
AVAILABLE_MODELS = {
"openai/gpt-4.1": "OpenAI GPT-4.1",
"openai/gpt-4.1-mini": "OpenAI GPT-4.1-mini",
"openai/gpt-4.1-nano": "OpenAI GPT-4.1-nano",
"openai/gpt-4o": "OpenAI GPT-4o",
"openai/gpt-4o-mini": "OpenAI GPT-4o mini",
"openai/o4-mini": "OpenAI o4-mini",
"microsoft/MAI-DS-R1": "MAI-DS-R1",
"microsoft/Phi-3.5-MoE-instruct": "Phi-3.5-MoE instruct (128k)",
"microsoft/Phi-3.5-mini-instruct": "Phi-3.5-mini instruct (128k)",
"microsoft/Phi-3.5-vision-instruct": "Phi-3.5-vision instruct (128k)",
"microsoft/Phi-3-medium-128k-instruct": "Phi-3-medium instruct (128k)",
"microsoft/Phi-3-medium-4k-instruct": "Phi-3-medium instruct (4k)",
"microsoft/Phi-3-mini-128k-instruct": "Phi-3-mini instruct (128k)",
"microsoft/Phi-3-small-128k-instruct": "Phi-3-small instruct (128k)",
"microsoft/Phi-3-small-8k-instruct": "Phi-3-small instruct (8k)",
"microsoft/Phi-4": "Phi-4",
"microsoft/Phi-4-mini-instruct": "Phi-4-mini-instruct",
"microsoft/Phi-4-multimodal-instruct": "Phi-4-multimodal-instruct",
"ai21-labs/AI21-Jamba-1.5-Large": "AI21 Jamba 1.5 Large",
"ai21-labs/AI21-Jamba-1.5-Mini": "AI21 Jamba 1.5 Mini",
"mistral-ai/Codestral-2501": "Codestral 25.01",
"cohere/Cohere-command-r": "Cohere Command R",
"cohere/Cohere-command-r-08-2024": "Cohere Command R 08-2024",
"cohere/Cohere-command-r-plus": "Cohere Command R+",
"cohere/Cohere-command-r-plus-08-2024": "Cohere Command R+ 08-2024",
"deepseek/DeepSeek-R1": "DeepSeek-R1",
"deepseek/DeepSeek-V3-0324": "DeepSeek-V3-0324",
"meta/Llama-3.2-11B-Vision-Instruct": "Llama-3.2-11B-Vision-Instruct",
"meta/Llama-3.2-90B-Vision-Instruct": "Llama-3.2-90B-Vision-Instruct",
"meta/Llama-3.3-70B-Instruct": "Llama-3.3-70B-Instruct",
"meta/Llama-4-Maverick-17B-128E-Instruct-FP8": "Llama 4 Maverick 17B 128E Instruct FP8",
"meta/Llama-4-Scout-17B-16E-Instruct": "Llama 4 Scout 17B 16E Instruct",
"meta/Meta-Llama-3.1-405B-Instruct": "Meta-Llama-3.1-405B-Instruct",
"meta/Meta-Llama-3.1-70B-Instruct": "Meta-Llama-3.1-70B-Instruct",
"meta/Meta-Llama-3.1-8B-Instruct": "Meta-Llama-3.1-8B-Instruct",
"meta/Meta-Llama-3-70B-Instruct": "Meta-Llama-3-70B-Instruct",
"meta/Meta-Llama-3-8B-Instruct": "Meta-Llama-3-8B-Instruct",
"mistral-ai/Ministral-3B": "Ministral 3B",
"mistral-ai/Mistral-Large-2411": "Mistral Large 24.11",
"mistral-ai/Mistral-Nemo": "Mistral Nemo",
"mistral-ai/Mistral-large-2407": "Mistral Large (2407)",
"mistral-ai/Mistral-small": "Mistral Small",
"cohere/cohere-command-a": "Cohere Command A",
"core42/jais-30b-chat": "JAIS 30b Chat",
"mistral-ai/mistral-small-2503": "Mistral Small 3.1"
}
# In-memory chat history and locks
chat_histories = defaultdict(list)
chat_locks = defaultdict(asyncio.Lock)
MAX_HISTORY = 100
# Streaming AI generation
async def generate_ai_response(chat_id: str, model: str):
token = os.getenv("GITHUB_TOKEN")
if not token:
yield "Error: GitHub token not configured"
raise HTTPException(status_code=500, detail="GitHub token not configured")
if model not in AVAILABLE_MODELS:
yield f"Error: Invalid model {model}"
raise HTTPException(status_code=400, detail="Invalid model")
client = AsyncOpenAI(
base_url="https://models.github.ai/inference",
api_key=token
)
try:
async with chat_locks[chat_id]:
stream = await asyncio.wait_for(
client.chat.completions.create(
messages=chat_histories[chat_id],
model=model,
temperature=1.0,
top_p=1.0,
stream=True
),
timeout=60
)
async for chunk in stream:
if chunk.choices and chunk.choices[0].delta.content:
content = chunk.choices[0].delta.content
yield content
async with chat_locks[chat_id]:
chat_histories[chat_id].append({"role": "assistant", "content": content})
chat_histories[chat_id] = chat_histories[chat_id][-MAX_HISTORY:]
except asyncio.TimeoutError:
yield "Error: Response timed out."
raise HTTPException(status_code=504, detail="Timeout")
except Exception as e:
yield f"Error: {str(e)}"
raise HTTPException(status_code=500, detail="AI generation failed")
# POST /generate
@app.post("/generate")
async def generate_response(
chat_id: str = Query(..., description="Unique chat ID"),
prompt: str = Query(..., description="User prompt"),
model: str = Query("openai/gpt-4.1-mini", description="Model to use")
):
if not prompt.strip():
raise HTTPException(status_code=400, detail="Prompt is required")
async with chat_locks[chat_id]:
chat_histories[chat_id].append({"role": "user", "content": prompt})
chat_histories[chat_id] = chat_histories[chat_id][-MAX_HISTORY:]
return StreamingResponse(
generate_ai_response(chat_id, model),
media_type="text/event-stream"
)
# POST /reset
@app.post("/reset")
async def reset_chat(chat_id: str = Query(..., description="Chat ID to reset")):
async with chat_locks[chat_id]:
chat_histories[chat_id].clear()
return {"message": f"Chat history for {chat_id} cleared."}
# For ASGI hosting
def get_app():
return app
|