abdullahalioo commited on
Commit
256ed7f
·
verified ·
1 Parent(s): 465b43c

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +76 -60
app.py CHANGED
@@ -2,68 +2,70 @@ import os
2
  from fastapi import FastAPI, HTTPException, Query
3
  from fastapi.responses import StreamingResponse
4
  from openai import AsyncOpenAI
 
5
 
6
  app = FastAPI()
7
 
8
- # Define available models (you can expand this list)
9
  AVAILABLE_MODELS = {
10
- "openai/gpt-4.1": "OpenAI GPT-4.1",
11
- "openai/gpt-4.1-mini": "OpenAI GPT-4.1-mini",
12
- "openai/gpt-4.1-nano": "OpenAI GPT-4.1-nano",
13
- "openai/gpt-4o": "OpenAI GPT-4o",
14
- "openai/gpt-4o-mini": "OpenAI GPT-4o mini",
15
- "openai/o4-mini": "OpenAI o4-mini",
16
- "microsoft/MAI-DS-R1": "MAI-DS-R1",
17
- "microsoft/Phi-3.5-MoE-instruct": "Phi-3.5-MoE instruct (128k)",
18
- "microsoft/Phi-3.5-mini-instruct": "Phi-3.5-mini instruct (128k)",
19
- "microsoft/Phi-3.5-vision-instruct": "Phi-3.5-vision instruct (128k)",
20
- "microsoft/Phi-3-medium-128k-instruct": "Phi-3-medium instruct (128k)",
21
- "microsoft/Phi-3-medium-4k-instruct": "Phi-3-medium instruct (4k)",
22
- "microsoft/Phi-3-mini-128k-instruct": "Phi-3-mini instruct (128k)",
23
- "microsoft/Phi-3-small-128k-instruct": "Phi-3-small instruct (128k)",
24
- "microsoft/Phi-3-small-8k-instruct": "Phi-3-small instruct (8k)",
25
- "microsoft/Phi-4": "Phi-4",
26
- "microsoft/Phi-4-mini-instruct": "Phi-4-mini-instruct",
27
- "microsoft/Phi-4-multimodal-instruct": "Phi-4-multimodal-instruct",
28
- "ai21-labs/AI21-Jamba-1.5-Large": "AI21 Jamba 1.5 Large",
29
- "ai21-labs/AI21-Jamba-1.5-Mini": "AI21 Jamba 1.5 Mini",
30
- "mistral-ai/Codestral-2501": "Codestral 25.01",
31
- "cohere/Cohere-command-r": "Cohere Command R",
32
- "cohere/Cohere-command-r-08-2024": "Cohere Command R 08-2024",
33
- "cohere/Cohere-command-r-plus": "Cohere Command R+",
34
- "cohere/Cohere-command-r-plus-08-2024": "Cohere Command R+ 08-2024",
35
- "deepseek/DeepSeek-R1": "DeepSeek-R1",
36
- "deepseek/DeepSeek-V3-0324": "DeepSeek-V3-0324",
37
- "meta/Llama-3.2-11B-Vision-Instruct": "Llama-3.2-11B-Vision-Instruct",
38
- "meta/Llama-3.2-90B-Vision-Instruct": "Llama-3.2-90B-Vision-Instruct",
39
- "meta/Llama-3.3-70B-Instruct": "Llama-3.3-70B-Instruct",
40
- "meta/Llama-4-Maverick-17B-128E-Instruct-FP8": "Llama 4 Maverick 17B 128E Instruct FP8",
41
- "meta/Llama-4-Scout-17B-16E-Instruct": "Llama 4 Scout 17B 16E Instruct",
42
- "meta/Meta-Llama-3.1-405B-Instruct": "Meta-Llama-3.1-405B-Instruct",
43
- "meta/Meta-Llama-3.1-70B-Instruct": "Meta-Llama-3.1-70B-Instruct",
44
- "meta/Meta-Llama-3.1-8B-Instruct": "Meta-Llama-3.1-8B-Instruct",
45
- "meta/Meta-Llama-3-70B-Instruct": "Meta-Llama-3-70B-Instruct",
46
- "meta/Meta-Llama-3-8B-Instruct": "Meta-Llama-3-8B-Instruct",
47
- "mistral-ai/Ministral-3B": "Ministral 3B",
48
- "mistral-ai/Mistral-Large-2411": "Mistral Large 24.11",
49
- "mistral-ai/Mistral-Nemo": "Mistral Nemo",
50
- "mistral-ai/Mistral-large-2407": "Mistral Large (2407)",
51
- "mistral-ai/Mistral-small": "Mistral Small",
52
- "cohere/cohere-command-a": "Cohere Command A",
53
- "core42/jais-30b-chat": "JAIS 30b Chat",
54
- "mistral-ai/mistral-small-2503": "Mistral Small 3.1"
55
  }
56
 
 
 
57
 
58
- async def generate_ai_response(prompt: str, model: str):
59
- # Configuration for unofficial GitHub AI endpoint
60
  token = os.getenv("GITHUB_TOKEN")
61
  if not token:
62
  raise HTTPException(status_code=500, detail="GitHub token not configured")
63
-
64
  endpoint = "https://models.github.ai/inference"
65
-
66
- # Validate the model
67
  if model not in AVAILABLE_MODELS:
68
  raise HTTPException(status_code=400, detail=f"Model not available. Choose from: {', '.join(AVAILABLE_MODELS.keys())}")
69
 
@@ -71,10 +73,7 @@ async def generate_ai_response(prompt: str, model: str):
71
 
72
  try:
73
  stream = await client.chat.completions.create(
74
- messages=[
75
-
76
- {"role": "user", "content": prompt}
77
- ],
78
  model=model,
79
  temperature=1.0,
80
  top_p=1.0,
@@ -83,24 +82,41 @@ async def generate_ai_response(prompt: str, model: str):
83
 
84
  async for chunk in stream:
85
  if chunk.choices and chunk.choices[0].delta.content:
86
- yield chunk.choices[0].delta.content
 
 
 
87
 
88
  except Exception as err:
89
  yield f"Error: {str(err)}"
90
  raise HTTPException(status_code=500, detail="AI generation failed")
91
 
 
92
  @app.post("/generate")
93
  async def generate_response(
94
- prompt: str = Query(..., description="The prompt for the AI"),
 
95
  model: str = Query("openai/gpt-4.1-mini", description="The model to use for generation")
96
  ):
97
  if not prompt:
98
  raise HTTPException(status_code=400, detail="Prompt cannot be empty")
99
-
 
 
 
100
  return StreamingResponse(
101
- generate_ai_response(prompt, model),
102
  media_type="text/event-stream"
103
  )
104
 
 
 
 
 
 
 
 
 
 
105
  def get_app():
106
- return app
 
2
  from fastapi import FastAPI, HTTPException, Query
3
  from fastapi.responses import StreamingResponse
4
  from openai import AsyncOpenAI
5
+ from collections import defaultdict
6
 
7
  app = FastAPI()
8
 
9
+ # Define available models
10
  AVAILABLE_MODELS = {
11
+ "openai/gpt-4.1": "OpenAI GPT-4.1",
12
+ "openai/gpt-4.1-mini": "OpenAI GPT-4.1-mini",
13
+ "openai/gpt-4.1-nano": "OpenAI GPT-4.1-nano",
14
+ "openai/gpt-4o": "OpenAI GPT-4o",
15
+ "openai/gpt-4o-mini": "OpenAI GPT-4o mini",
16
+ "openai/o4-mini": "OpenAI o4-mini",
17
+ "microsoft/MAI-DS-R1": "MAI-DS-R1",
18
+ "microsoft/Phi-3.5-MoE-instruct": "Phi-3.5-MoE instruct (128k)",
19
+ "microsoft/Phi-3.5-mini-instruct": "Phi-3.5-mini instruct (128k)",
20
+ "microsoft/Phi-3.5-vision-instruct": "Phi-3.5-vision instruct (128k)",
21
+ "microsoft/Phi-3-medium-128k-instruct": "Phi-3-medium instruct (128k)",
22
+ "microsoft/Phi-3-medium-4k-instruct": "Phi-3-medium instruct (4k)",
23
+ "microsoft/Phi-3-mini-128k-instruct": "Phi-3-mini instruct (128k)",
24
+ "microsoft/Phi-3-small-128k-instruct": "Phi-3-small instruct (128k)",
25
+ "microsoft/Phi-3-small-8k-instruct": "Phi-3-small instruct (8k)",
26
+ "microsoft/Phi-4": "Phi-4",
27
+ "microsoft/Phi-4-mini-instruct": "Phi-4-mini-instruct",
28
+ "microsoft/Phi-4-multimodal-instruct": "Phi-4-multimodal-instruct",
29
+ "ai21-labs/AI21-Jamba-1.5-Large": "AI21 Jamba 1.5 Large",
30
+ "ai21-labs/AI21-Jamba-1.5-Mini": "AI21 Jamba 1.5 Mini",
31
+ "mistral-ai/Codestral-2501": "Codestral 25.01",
32
+ "cohere/Cohere-command-r": "Cohere Command R",
33
+ "cohere/Cohere-command-r-08-2024": "Cohere Command R 08-2024",
34
+ "cohere/Cohere-command-r-plus": "Cohere Command R+",
35
+ "cohere/Cohere-command-r-plus-08-2024": "Cohere Command R+ 08-2024",
36
+ "deepseek/DeepSeek-R1": "DeepSeek-R1",
37
+ "deepseek/DeepSeek-V3-0324": "DeepSeek-V3-0324",
38
+ "meta/Llama-3.2-11B-Vision-Instruct": "Llama-3.2-11B-Vision-Instruct",
39
+ "meta/Llama-3.2-90B-Vision-Instruct": "Llama-3.2-90B-Vision-Instruct",
40
+ "meta/Llama-3.3-70B-Instruct": "Llama-3.3-70B-Instruct",
41
+ "meta/Llama-4-Maverick-17B-128E-Instruct-FP8": "Llama 4 Maverick 17B 128E Instruct FP8",
42
+ "meta/Llama-4-Scout-17B-16E-Instruct": "Llama 4 Scout 17B 16E Instruct",
43
+ "meta/Meta-Llama-3.1-405B-Instruct": "Meta-Llama-3.1-405B-Instruct",
44
+ "meta/Meta-Llama-3.1-70B-Instruct": "Meta-Llama-3.1-70B-Instruct",
45
+ "meta/Meta-Llama-3.1-8B-Instruct": "Meta-Llama-3.1-8B-Instruct",
46
+ "meta/Meta-Llama-3-70B-Instruct": "Meta-Llama-3-70B-Instruct",
47
+ "meta/Meta-Llama-3-8B-Instruct": "Meta-Llama-3-8B-Instruct",
48
+ "mistral-ai/Ministral-3B": "Ministral 3B",
49
+ "mistral-ai/Mistral-Large-2411": "Mistral Large 24.11",
50
+ "mistral-ai/Mistral-Nemo": "Mistral Nemo",
51
+ "mistral-ai/Mistral-large-2407": "Mistral Large (2407)",
52
+ "mistral-ai/Mistral-small": "Mistral Small",
53
+ "cohere/cohere-command-a": "Cohere Command A",
54
+ "core42/jais-30b-chat": "JAIS 30b Chat",
55
+ "mistral-ai/mistral-small-2503": "Mistral Small 3.1"
56
  }
57
 
58
+ # In-memory chat history (chat_id: messages[])
59
+ chat_histories = defaultdict(list)
60
 
61
+ # Function to generate response using chat history
62
+ async def generate_ai_response(chat_id: str, model: str):
63
  token = os.getenv("GITHUB_TOKEN")
64
  if not token:
65
  raise HTTPException(status_code=500, detail="GitHub token not configured")
66
+
67
  endpoint = "https://models.github.ai/inference"
68
+
 
69
  if model not in AVAILABLE_MODELS:
70
  raise HTTPException(status_code=400, detail=f"Model not available. Choose from: {', '.join(AVAILABLE_MODELS.keys())}")
71
 
 
73
 
74
  try:
75
  stream = await client.chat.completions.create(
76
+ messages=chat_histories[chat_id], # full chat history
 
 
 
77
  model=model,
78
  temperature=1.0,
79
  top_p=1.0,
 
82
 
83
  async for chunk in stream:
84
  if chunk.choices and chunk.choices[0].delta.content:
85
+ content = chunk.choices[0].delta.content
86
+ yield content
87
+ # Add assistant reply to history
88
+ chat_histories[chat_id].append({"role": "assistant", "content": content})
89
 
90
  except Exception as err:
91
  yield f"Error: {str(err)}"
92
  raise HTTPException(status_code=500, detail="AI generation failed")
93
 
94
+ # Endpoint to generate a response with chat memory
95
  @app.post("/generate")
96
  async def generate_response(
97
+ chat_id: str = Query(..., description="Unique ID for the chat session"),
98
+ prompt: str = Query(..., description="The user message"),
99
  model: str = Query("openai/gpt-4.1-mini", description="The model to use for generation")
100
  ):
101
  if not prompt:
102
  raise HTTPException(status_code=400, detail="Prompt cannot be empty")
103
+
104
+ # Add user message to history
105
+ chat_histories[chat_id].append({"role": "user", "content": prompt})
106
+
107
  return StreamingResponse(
108
+ generate_ai_response(chat_id, model),
109
  media_type="text/event-stream"
110
  )
111
 
112
+ # Optional: endpoint to reset chat history
113
+ @app.post("/reset")
114
+ async def reset_chat(chat_id: str = Query(..., description="ID of chat to reset")):
115
+ if chat_id in chat_histories:
116
+ chat_histories[chat_id].clear()
117
+ return {"message": f"Chat {chat_id} history reset."}
118
+ else:
119
+ raise HTTPException(status_code=404, detail="Chat ID not found")
120
+
121
  def get_app():
122
+ return app