abdullahalioo commited on
Commit
a980bb9
·
verified ·
1 Parent(s): b685be0

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +124 -116
app.py CHANGED
@@ -8,129 +8,137 @@ from collections import defaultdict
8
  app = FastAPI()
9
 
10
  # Define available models
11
- AVAILABLE_MODELS = {
12
- "openai/gpt-4.1": "OpenAI GPT-4.1",
13
- "openai/gpt-4.1-mini": "OpenAI GPT-4.1-mini",
14
- "openai/gpt-4.1-nano": "OpenAI GPT-4.1-nano",
15
- "openai/gpt-4o": "OpenAI GPT-4o",
16
- "openai/gpt-4o-mini": "OpenAI GPT-4o mini",
17
- "openai/o4-mini": "OpenAI o4-mini",
18
- "microsoft/MAI-DS-R1": "MAI-DS-R1",
19
- "microsoft/Phi-3.5-MoE-instruct": "Phi-3.5-MoE instruct (128k)",
20
- "microsoft/Phi-3.5-mini-instruct": "Phi-3.5-mini instruct (128k)",
21
- "microsoft/Phi-3.5-vision-instruct": "Phi-3.5-vision instruct (128k)",
22
- "microsoft/Phi-3-medium-128k-instruct": "Phi-3-medium instruct (128k)",
23
- "microsoft/Phi-3-medium-4k-instruct": "Phi-3-medium instruct (4k)",
24
- "microsoft/Phi-3-mini-128k-instruct": "Phi-3-mini instruct (128k)",
25
- "microsoft/Phi-3-small-128k-instruct": "Phi-3-small instruct (128k)",
26
- "microsoft/Phi-3-small-8k-instruct": "Phi-3-small instruct (8k)",
27
- "microsoft/Phi-4": "Phi-4",
28
- "microsoft/Phi-4-mini-instruct": "Phi-4-mini-instruct",
29
- "microsoft/Phi-4-multimodal-instruct": "Phi-4-multimodal-instruct",
30
- "ai21-labs/AI21-Jamba-1.5-Large": "AI21 Jamba 1.5 Large",
31
- "ai21-labs/AI21-Jamba-1.5-Mini": "AI21 Jamba 1.5 Mini",
32
- "mistral-ai/Codestral-2501": "Codestral 25.01",
33
- "cohere/Cohere-command-r": "Cohere Command R",
34
- "cohere/Cohere-command-r-08-2024": "Cohere Command R 08-2024",
35
- "cohere/Cohere-command-r-plus": "Cohere Command R+",
36
- "cohere/Cohere-command-r-plus-08-2024": "Cohere Command R+ 08-2024",
37
- "deepseek/DeepSeek-R1": "DeepSeek-R1",
38
- "deepseek/DeepSeek-V3-0324": "DeepSeek-V3-0324",
39
- "meta/Llama-3.2-11B-Vision-Instruct": "Llama-3.2-11B-Vision-Instruct",
40
- "meta/Llama-3.2-90B-Vision-Instruct": "Llama-3.2-90B-Vision-Instruct",
41
- "meta/Llama-3.3-70B-Instruct": "Llama-3.3-70B-Instruct",
42
- "meta/Llama-4-Maverick-17B-128E-Instruct-FP8": "Llama 4 Maverick 17B 128E Instruct FP8",
43
- "meta/Llama-4-Scout-17B-16E-Instruct": "Llama 4 Scout 17B 16E Instruct",
44
- "meta/Meta-Llama-3.1-405B-Instruct": "Meta-Llama-3.1-405B-Instruct",
45
- "meta/Meta-Llama-3.1-70B-Instruct": "Meta-Llama-3.1-70B-Instruct",
46
- "meta/Meta-Llama-3.1-8B-Instruct": "Meta-Llama-3.1-8B-Instruct",
47
- "meta/Meta-Llama-3-70B-Instruct": "Meta-Llama-3-70B-Instruct",
48
- "meta/Meta-Llama-3-8B-Instruct": "Meta-Llama-3-8B-Instruct",
49
- "mistral-ai/Ministral-3B": "Ministral 3B",
50
- "mistral-ai/Mistral-Large-2411": "Mistral Large 24.11",
51
- "mistral-ai/Mistral-Nemo": "Mistral Nemo",
52
- "mistral-ai/Mistral-large-2407": "Mistral Large (2407)",
53
- "mistral-ai/Mistral-small": "Mistral Small",
54
- "cohere/cohere-command-a": "Cohere Command A",
55
- "core42/jais-30b-chat": "JAIS 30b Chat",
56
- "mistral-ai/mistral-small-2503": "Mistral Small 3.1"
 
57
  }
58
 
59
- # In-memory chat history and locks
60
- chat_histories = defaultdict(list)
61
- chat_locks = defaultdict(asyncio.Lock)
62
- MAX_HISTORY = 100
63
-
64
- # Streaming AI generation
65
- async def generate_ai_response(chat_id: str, model: str):
66
- token = os.getenv("GITHUB_TOKEN")
67
- if not token:
68
- yield "Error: GitHub token not configured"
69
- raise HTTPException(status_code=500, detail="GitHub token not configured")
70
-
71
- if model not in AVAILABLE_MODELS:
72
- yield f"Error: Invalid model {model}"
73
- raise HTTPException(status_code=400, detail="Invalid model")
74
-
75
- client = AsyncOpenAI(
76
- base_url="https://models.github.ai/inference",
77
- api_key=token
 
 
 
 
 
 
 
 
 
 
 
 
 
 
78
  )
79
 
80
- try:
81
- async with chat_locks[chat_id]:
82
- stream = await asyncio.wait_for(
83
- client.chat.completions.create(
84
- messages=chat_histories[chat_id],
85
- model=model,
86
- temperature=1.0,
87
- top_p=1.0,
88
- stream=True
89
- ),
90
- timeout=60
91
- )
92
-
93
- async for chunk in stream:
94
- if chunk.choices and chunk.choices[0].delta.content:
95
- content = chunk.choices[0].delta.content
96
- yield content
97
- async with chat_locks[chat_id]:
98
- chat_histories[chat_id].append({"role": "assistant", "content": content})
99
- chat_histories[chat_id] = chat_histories[chat_id][-MAX_HISTORY:]
100
-
101
- except asyncio.TimeoutError:
102
- yield "Error: Response timed out."
103
- raise HTTPException(status_code=504, detail="Timeout")
104
- except Exception as e:
105
- yield f"Error: {str(e)}"
106
- raise HTTPException(status_code=500, detail="AI generation failed")
107
-
108
- # POST /generate
109
  @app.post("/generate")
110
- async def generate_response(
111
- chat_id: str = Query(..., description="Unique chat ID"),
112
- prompt: str = Query(..., description="User prompt"),
113
- model: str = Query("openai/gpt-4.1-mini", description="Model to use")
114
  ):
115
- if not prompt.strip():
116
- raise HTTPException(status_code=400, detail="Prompt is required")
117
 
118
- async with chat_locks[chat_id]:
119
- chat_histories[chat_id].append({"role": "user", "content": prompt})
120
- chat_histories[chat_id] = chat_histories[chat_id][-MAX_HISTORY:]
121
 
122
- return StreamingResponse(
123
- generate_ai_response(chat_id, model),
124
- media_type="text/event-stream"
125
- )
 
 
 
126
 
127
- # POST /reset
128
  @app.post("/reset")
129
- async def reset_chat(chat_id: str = Query(..., description="Chat ID to reset")):
130
- async with chat_locks[chat_id]:
131
- chat_histories[chat_id].clear()
132
- return {"message": f"Chat history for {chat_id} cleared."}
133
-
134
- # For ASGI hosting
135
- def get_app():
136
- return app
 
 
 
 
8
  app = FastAPI()
9
 
10
  # Define available models
11
+
12
+ AVAILABLE\_MODELS = {
13
+ "openai/gpt-4.1": "OpenAI GPT-4.1",
14
+ "openai/gpt-4.1-mini": "OpenAI GPT-4.1-mini",
15
+ "openai/gpt-4.1-nano": "OpenAI GPT-4.1-nano",
16
+ "openai/gpt-4o": "OpenAI GPT-4o",
17
+ "openai/gpt-4o-mini": "OpenAI GPT-4o mini",
18
+ "openai/o4-mini": "OpenAI o4-mini",
19
+ "microsoft/MAI-DS-R1": "MAI-DS-R1",
20
+ "microsoft/Phi-3.5-MoE-instruct": "Phi-3.5-MoE instruct (128k)",
21
+ "microsoft/Phi-3.5-mini-instruct": "Phi-3.5-mini instruct (128k)",
22
+ "microsoft/Phi-3.5-vision-instruct": "Phi-3.5-vision instruct (128k)",
23
+ "microsoft/Phi-3-medium-128k-instruct": "Phi-3-medium instruct (128k)",
24
+ "microsoft/Phi-3-medium-4k-instruct": "Phi-3-medium instruct (4k)",
25
+ "microsoft/Phi-3-mini-128k-instruct": "Phi-3-mini instruct (128k)",
26
+ "microsoft/Phi-3-small-128k-instruct": "Phi-3-small instruct (128k)",
27
+ "microsoft/Phi-3-small-8k-instruct": "Phi-3-small instruct (8k)",
28
+ "microsoft/Phi-4": "Phi-4",
29
+ "microsoft/Phi-4-mini-instruct": "Phi-4-mini-instruct",
30
+ "microsoft/Phi-4-multimodal-instruct": "Phi-4-multimodal-instruct",
31
+ "ai21-labs/AI21-Jamba-1.5-Large": "AI21 Jamba 1.5 Large",
32
+ "ai21-labs/AI21-Jamba-1.5-Mini": "AI21 Jamba 1.5 Mini",
33
+ "mistral-ai/Codestral-2501": "Codestral 25.01",
34
+ "cohere/Cohere-command-r": "Cohere Command R",
35
+ "cohere/Cohere-command-r-08-2024": "Cohere Command R 08-2024",
36
+ "cohere/Cohere-command-r-plus": "Cohere Command R+",
37
+ "cohere/Cohere-command-r-plus-08-2024": "Cohere Command R+ 08-2024",
38
+ "deepseek/DeepSeek-R1": "DeepSeek-R1",
39
+ "deepseek/DeepSeek-V3-0324": "DeepSeek-V3-0324",
40
+ "meta/Llama-3.2-11B-Vision-Instruct": "Llama-3.2-11B-Vision-Instruct",
41
+ "meta/Llama-3.2-90B-Vision-Instruct": "Llama-3.2-90B-Vision-Instruct",
42
+ "meta/Llama-3.3-70B-Instruct": "Llama-3.3-70B-Instruct",
43
+ "meta/Llama-4-Maverick-17B-128E-Instruct-FP8": "Llama 4 Maverick 17B 128E Instruct FP8",
44
+ "meta/Llama-4-Scout-17B-16E-Instruct": "Llama 4 Scout 17B 16E Instruct",
45
+ "meta/Meta-Llama-3.1-405B-Instruct": "Meta-Llama-3.1-405B-Instruct",
46
+ "meta/Meta-Llama-3.1-70B-Instruct": "Meta-Llama-3.1-70B-Instruct",
47
+ "meta/Meta-Llama-3.1-8B-Instruct": "Meta-Llama-3.1-8B-Instruct",
48
+ "meta/Meta-Llama-3-70B-Instruct": "Meta-Llama-3-70B-Instruct",
49
+ "meta/Meta-Llama-3-8B-Instruct": "Meta-Llama-3-8B-Instruct",
50
+ "mistral-ai/Ministral-3B": "Ministral 3B",
51
+ "mistral-ai/Mistral-Large-2411": "Mistral Large 24.11",
52
+ "mistral-ai/Mistral-Nemo": "Mistral Nemo",
53
+ "mistral-ai/Mistral-large-2407": "Mistral Large (2407)",
54
+ "mistral-ai/Mistral-small": "Mistral Small",
55
+ "cohere/cohere-command-a": "Cohere Command A",
56
+ "core42/jais-30b-chat": "JAIS 30b Chat",
57
+ "mistral-ai/mistral-small-2503": "Mistral Small 3.1"
58
  }
59
 
60
+ # Chat memory (in-memory)
61
+
62
+ chat\_histories = defaultdict(list)
63
+ MAX\_HISTORY = 100 # limit memory to avoid crashes
64
+
65
+ # Generate response stream
66
+
67
+ async def generate\_ai\_response(chat\_id: str, model: str):
68
+ token = os.getenv("GITHUB\_TOKEN")
69
+ if not token:
70
+ raise HTTPException(status\_code=500, detail="GitHub token not configured")
71
+
72
+ ```
73
+ endpoint = "https://models.github.ai/inference"
74
+
75
+ if model not in AVAILABLE_MODELS:
76
+ raise HTTPException(
77
+ status_code=400,
78
+ detail=f"Model not available. Choose from: {', '.join(AVAILABLE_MODELS.keys())}"
79
+ )
80
+
81
+ client = AsyncOpenAI(base_url=endpoint, api_key=token)
82
+
83
+ try:
84
+ stream = await asyncio.wait_for(
85
+ client.chat.completions.create(
86
+ messages=chat_histories[chat_id],
87
+ model=model,
88
+ temperature=1.0,
89
+ top_p=1.0,
90
+ stream=True
91
+ ),
92
+ timeout=60 # Prevent hangs
93
  )
94
 
95
+ async for chunk in stream:
96
+ if chunk.choices and chunk.choices[0].delta.content:
97
+ content = chunk.choices[0].delta.content
98
+ yield content
99
+ chat_histories[chat_id].append({"role": "assistant", "content": content})
100
+ chat_histories[chat_id] = chat_histories[chat_id][-MAX_HISTORY:]
101
+
102
+ except asyncio.TimeoutError:
103
+ yield "Error: Response timed out."
104
+ raise HTTPException(status_code=504, detail="Model timed out.")
105
+ except Exception as err:
106
+ yield f"Error: {str(err)}"
107
+ raise HTTPException(status_code=500, detail="AI generation failed")
108
+ ```
109
+
110
+ # Chat endpoint
111
+
 
 
 
 
 
 
 
 
 
 
 
 
112
  @app.post("/generate")
113
+ async def generate\_response(
114
+ chat\_id: str = Query(..., description="Unique chat ID"),
115
+ prompt: str = Query(..., description="User message"),
116
+ model: str = Query("openai/gpt-4.1-mini", description="Model to use")
117
  ):
118
+ if not prompt:
119
+ raise HTTPException(status\_code=400, detail="Prompt cannot be empty")
120
 
121
+ ```
122
+ chat_histories[chat_id].append({"role": "user", "content": prompt})
123
+ chat_histories[chat_id] = chat_histories[chat_id][-MAX_HISTORY:]
124
 
125
+ return StreamingResponse(
126
+ generate_ai_response(chat_id, model),
127
+ media_type="text/event-stream"
128
+ )
129
+ ```
130
+
131
+ # Optional: reset chat history
132
 
 
133
  @app.post("/reset")
134
+ async def reset\_chat(chat\_id: str = Query(..., description="ID of chat to reset")):
135
+ if chat\_id in chat\_histories:
136
+ chat\_histories\[chat\_id].clear()
137
+ return {"message": f"Chat {chat\_id} history reset."}
138
+ else:
139
+ raise HTTPException(status\_code=404, detail="Chat ID not found")
140
+
141
+ # For ASGI servers like Uvicorn
142
+
143
+ def get\_app():
144
+ return app