Spaces:
Running
Running
Update app.py
Browse files
app.py
CHANGED
@@ -1,69 +1,78 @@
|
|
1 |
import os
|
2 |
-
from fastapi import FastAPI, HTTPException, Query
|
3 |
-
from fastapi.responses import StreamingResponse
|
4 |
from openai import AsyncOpenAI
|
|
|
|
|
5 |
|
6 |
app = FastAPI()
|
7 |
|
8 |
-
# Define available models (
|
9 |
AVAILABLE_MODELS = {
|
10 |
-
|
11 |
-
|
12 |
-
|
13 |
-
|
14 |
-
|
15 |
-
|
16 |
-
|
17 |
-
|
18 |
-
|
19 |
-
|
20 |
-
|
21 |
-
|
22 |
-
|
23 |
-
|
24 |
-
|
25 |
-
|
26 |
-
|
27 |
-
|
28 |
-
|
29 |
-
|
30 |
-
|
31 |
-
|
32 |
-
|
33 |
-
|
34 |
-
|
35 |
-
|
36 |
-
|
37 |
-
|
38 |
-
|
39 |
-
|
40 |
-
|
41 |
-
|
42 |
-
|
43 |
-
|
44 |
-
|
45 |
-
|
46 |
-
|
47 |
-
|
48 |
-
|
49 |
-
|
50 |
-
|
51 |
-
|
52 |
-
|
53 |
-
|
54 |
-
|
55 |
}
|
56 |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
57 |
|
58 |
async def generate_ai_response(prompt: str, model: str):
|
59 |
-
# Configuration for unofficial GitHub AI endpoint
|
60 |
token = os.getenv("GITHUB_TOKEN")
|
61 |
if not token:
|
62 |
raise HTTPException(status_code=500, detail="GitHub token not configured")
|
63 |
|
64 |
endpoint = "https://models.github.ai/inference"
|
65 |
|
66 |
-
# Validate the model
|
67 |
if model not in AVAILABLE_MODELS:
|
68 |
raise HTTPException(status_code=400, detail=f"Model not available. Choose from: {', '.join(AVAILABLE_MODELS.keys())}")
|
69 |
|
@@ -72,7 +81,6 @@ async def generate_ai_response(prompt: str, model: str):
|
|
72 |
try:
|
73 |
stream = await client.chat.completions.create(
|
74 |
messages=[
|
75 |
-
|
76 |
{"role": "user", "content": prompt}
|
77 |
],
|
78 |
model=model,
|
@@ -89,6 +97,47 @@ async def generate_ai_response(prompt: str, model: str):
|
|
89 |
yield f"Error: {str(err)}"
|
90 |
raise HTTPException(status_code=500, detail="AI generation failed")
|
91 |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
92 |
@app.post("/generate")
|
93 |
async def generate_response(
|
94 |
prompt: str = Query(..., description="The prompt for the AI"),
|
@@ -102,5 +151,23 @@ async def generate_response(
|
|
102 |
media_type="text/event-stream"
|
103 |
)
|
104 |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
105 |
def get_app():
|
106 |
return app
|
|
|
1 |
import os
|
2 |
+
from fastapi import FastAPI, HTTPException, Query, File, UploadFile, Form
|
3 |
+
from fastapi.responses import StreamingResponse, JSONResponse
|
4 |
from openai import AsyncOpenAI
|
5 |
+
import base64
|
6 |
+
from typing import Optional
|
7 |
|
8 |
app = FastAPI()
|
9 |
|
10 |
+
# Define available models (unchanged)
|
11 |
AVAILABLE_MODELS = {
|
12 |
+
"openai/gpt-4.1": "OpenAI GPT-4.1",
|
13 |
+
"openai/gpt-4.1-mini": "OpenAI GPT-4.1-mini",
|
14 |
+
"openai/gpt-4.1-nano": "OpenAI GPT-4.1-nano",
|
15 |
+
"openai/gpt-4o": "OpenAI GPT-4o",
|
16 |
+
"openai/gpt-4o-mini": "OpenAI GPT-4o mini",
|
17 |
+
"openai/o4-mini": "OpenAI o4-mini",
|
18 |
+
"microsoft/MAI-DS-R1": "MAI-DS-R1",
|
19 |
+
"microsoft/Phi-3.5-MoE-instruct": "Phi-3.5-MoE instruct (128k)",
|
20 |
+
"microsoft/Phi-3.5-mini-instruct": "Phi-3.5-mini instruct (128k)",
|
21 |
+
"microsoft/Phi-3.5-vision-instruct": "Phi-3.5-vision instruct (128k)",
|
22 |
+
"microsoft/Phi-3-medium-128k-instruct": "Phi-3-medium instruct (128k)",
|
23 |
+
"microsoft/Phi-3-medium-4k-instruct": "Phi-3-medium instruct (4k)",
|
24 |
+
"microsoft/Phi-3-mini-128k-instruct": "Phi-3-mini instruct (128k)",
|
25 |
+
"microsoft/Phi-3-small-128k-instruct": "Phi-3-small instruct (128k)",
|
26 |
+
"microsoft/Phi-3-small-8k-instruct": "Phi-3-small instruct (8k)",
|
27 |
+
"microsoft/Phi-4": "Phi-4",
|
28 |
+
"microsoft/Phi-4-mini-instruct": "Phi-4-mini-instruct",
|
29 |
+
"microsoft/Phi-4-multimodal-instruct": "Phi-4-multimodal-instruct",
|
30 |
+
"ai21-labs/AI21-Jamba-1.5-Large": "AI21 Jamba 1.5 Large",
|
31 |
+
"ai21-labs/AI21-Jamba-1.5-Mini": "AI21 Jamba 1.5 Mini",
|
32 |
+
"mistral-ai/Codestral-2501": "Codestral 25.01",
|
33 |
+
"cohere/Cohere-command-r": "Cohere Command R",
|
34 |
+
"cohere/Cohere-command-r-08-2024": "Cohere Command R 08-2024",
|
35 |
+
"cohere/Cohere-command-r-plus": "Cohere Command R+",
|
36 |
+
"cohere/Cohere-command-r-plus-08-2024": "Cohere Command R+ 08-2024",
|
37 |
+
"deepseek/DeepSeek-R1": "DeepSeek-R1",
|
38 |
+
"deepseek/DeepSeek-V3-0324": "DeepSeek-V3-0324",
|
39 |
+
"meta/Llama-3.2-11B-Vision-Instruct": "Llama-3.2-11B-Vision-Instruct",
|
40 |
+
"meta/Llama-3.2-90B-Vision-Instruct": "Llama-3.2-90B-Vision-Instruct",
|
41 |
+
"meta/Llama-3.3-70B-Instruct": "Llama-3.3-70B-Instruct",
|
42 |
+
"meta/Llama-4-Maverick-17B-128E-Instruct-FP8": "Llama 4 Maverick 17B 128E Instruct FP8",
|
43 |
+
"meta/Llama-4-Scout-17B-16E-Instruct": "Llama 4 Scout 17B 16E Instruct",
|
44 |
+
"meta/Meta-Llama-3.1-405B-Instruct": "Meta-Llama-3.1-405B-Instruct",
|
45 |
+
"meta/Meta-Llama-3.1-70B-Instruct": "Meta-Llama-3.1-70B-Instruct",
|
46 |
+
"meta/Meta-Llama-3.1-8B-Instruct": "Meta-Llama-3.1-8B-Instruct",
|
47 |
+
"meta/Meta-Llama-3-70B-Instruct": "Meta-Llama-3-70B-Instruct",
|
48 |
+
"meta/Meta-Llama-3-8B-Instruct": "Meta-Llama-3-8B-Instruct",
|
49 |
+
"mistral-ai/Ministral-3B": "Ministral 3B",
|
50 |
+
"mistral-ai/Mistral-Large-2411": "Mistral Large 24.11",
|
51 |
+
"mistral-ai/Mistral-Nemo": "Mistral Nemo",
|
52 |
+
"mistral-ai/Mistral-large-2407": "Mistral Large (2407)",
|
53 |
+
"mistral-ai/Mistral-small": "Mistral Small",
|
54 |
+
"cohere/cohere-command-a": "Cohere Command A",
|
55 |
+
"core42/jais-30b-chat": "JAIS 30b Chat",
|
56 |
+
"mistral-ai/mistral-small-2503": "Mistral Small 3.1"
|
57 |
}
|
58 |
|
59 |
+
# Vision-capable models (subset of AVAILABLE_MODELS)
|
60 |
+
VISION_MODELS = [
|
61 |
+
"openai/gpt-4o",
|
62 |
+
"openai/gpt-4o-mini",
|
63 |
+
"microsoft/Phi-3.5-vision-instruct",
|
64 |
+
"meta/Llama-3.2-11B-Vision-Instruct",
|
65 |
+
"meta/Llama-3.2-90B-Vision-Instruct",
|
66 |
+
"microsoft/Phi-4-multimodal-instruct"
|
67 |
+
]
|
68 |
|
69 |
async def generate_ai_response(prompt: str, model: str):
|
|
|
70 |
token = os.getenv("GITHUB_TOKEN")
|
71 |
if not token:
|
72 |
raise HTTPException(status_code=500, detail="GitHub token not configured")
|
73 |
|
74 |
endpoint = "https://models.github.ai/inference"
|
75 |
|
|
|
76 |
if model not in AVAILABLE_MODELS:
|
77 |
raise HTTPException(status_code=400, detail=f"Model not available. Choose from: {', '.join(AVAILABLE_MODELS.keys())}")
|
78 |
|
|
|
81 |
try:
|
82 |
stream = await client.chat.completions.create(
|
83 |
messages=[
|
|
|
84 |
{"role": "user", "content": prompt}
|
85 |
],
|
86 |
model=model,
|
|
|
97 |
yield f"Error: {str(err)}"
|
98 |
raise HTTPException(status_code=500, detail="AI generation failed")
|
99 |
|
100 |
+
async def process_image_with_vision(image: bytes, question: str, model: str):
|
101 |
+
token = os.getenv("GITHUB_TOKEN")
|
102 |
+
if not token:
|
103 |
+
raise HTTPException(status_code=500, detail="GitHub token not configured")
|
104 |
+
|
105 |
+
endpoint = "https://models.github.ai/inference"
|
106 |
+
|
107 |
+
if model not in VISION_MODELS:
|
108 |
+
raise HTTPException(status_code=400, detail=f"Model not vision-capable. Choose from: {', '.join(VISION_MODELS)}")
|
109 |
+
|
110 |
+
client = AsyncOpenAI(base_url=endpoint, api_key=token)
|
111 |
+
|
112 |
+
# Encode image to base64
|
113 |
+
base64_image = base64.b64encode(image).decode("utf-8")
|
114 |
+
|
115 |
+
try:
|
116 |
+
# Non-streaming request for vision task
|
117 |
+
response = await client.chat.completions.create(
|
118 |
+
messages=[
|
119 |
+
{
|
120 |
+
"role": "user",
|
121 |
+
"content": [
|
122 |
+
{"type": "text", "text": question},
|
123 |
+
{
|
124 |
+
"type": "image_url",
|
125 |
+
"image_url": {"url": f"data:image/jpeg;base64,{base64_image}"}
|
126 |
+
}
|
127 |
+
]
|
128 |
+
}
|
129 |
+
],
|
130 |
+
model=model,
|
131 |
+
temperature=1.0,
|
132 |
+
top_p=1.0,
|
133 |
+
stream=False # Vision tasks typically don't stream
|
134 |
+
)
|
135 |
+
|
136 |
+
return response.choices[0].message.content
|
137 |
+
|
138 |
+
except Exception as err:
|
139 |
+
raise HTTPException(status_code=500, detail=f"Vision processing failed: {str(err)}")
|
140 |
+
|
141 |
@app.post("/generate")
|
142 |
async def generate_response(
|
143 |
prompt: str = Query(..., description="The prompt for the AI"),
|
|
|
151 |
media_type="text/event-stream"
|
152 |
)
|
153 |
|
154 |
+
@app.post("/process-image")
|
155 |
+
async def process_image(
|
156 |
+
image: UploadFile = File(..., description="Image file (PNG, JPEG, GIF)"),
|
157 |
+
question: str = Form(..., description="Question about the image"),
|
158 |
+
model: str = Form("openai/gpt-4o", description="Vision-capable model")
|
159 |
+
):
|
160 |
+
# Validate image format
|
161 |
+
if not image.filename.lower().endswith((".png", ".jpg", ".jpeg", ".gif")):
|
162 |
+
raise HTTPException(status_code=400, detail="Unsupported image format. Use PNG, JPEG, or GIF.")
|
163 |
+
|
164 |
+
# Read image content
|
165 |
+
image_data = await image.read()
|
166 |
+
|
167 |
+
# Process image with vision model
|
168 |
+
response = await process_image_with_vision(image_data, question, model)
|
169 |
+
|
170 |
+
return JSONResponse(content={"answer": response})
|
171 |
+
|
172 |
def get_app():
|
173 |
return app
|