cstr commited on
Commit
9918749
·
verified ·
1 Parent(s): 25f51d0

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +48 -201
app.py CHANGED
@@ -1,76 +1,44 @@
1
  import os
2
- import base64
3
  import gradio as gr
4
  import requests
5
  import json
6
- from io import BytesIO
7
- from PIL import Image
8
 
9
- # Get API key from environment variable for security
10
  OPENROUTER_API_KEY = os.environ.get("OPENROUTER_API_KEY", "")
11
 
12
- # Model list
13
- models = [
14
- ("Google Gemini Pro 2.0", "google/gemini-2.0-pro-exp-02-05:free"),
15
- ("Google Gemini 2.5 Pro", "google/gemini-2.5-pro-exp-03-25:free"),
16
- ("Meta Llama 3.2 Vision", "meta-llama/llama-3.2-11b-vision-instruct:free"),
17
- ("Qwen 2.5 VL", "qwen/qwen2.5-vl-72b-instruct:free"),
18
- ("DeepSeek R1", "deepseek/deepseek-r1:free"),
19
- ("Mistral 3.1", "mistralai/mistral-small-3.1-24b-instruct:free")
20
  ]
21
 
22
- def get_ai_response(message, history, model_name, image=None, file=None):
23
- """Get response from AI"""
24
- # Find model ID
25
- model_id = next((mid for name, mid in models if name == model_name), models[0][1])
 
 
 
 
26
 
27
- # Prepare messages
28
  messages = []
29
- for human, ai in history:
30
- messages.append({"role": "user", "content": human})
31
- if ai:
32
- messages.append({"role": "assistant", "content": ai})
33
 
34
- # Handle file
35
- if file is not None:
36
- try:
37
- with open(file.name, 'r', encoding='utf-8') as f:
38
- file_content = f.read()
39
- message = f"{message}\n\nFile content:\n```\n{file_content}\n```"
40
- except Exception as e:
41
- message = f"{message}\n\nError reading file: {str(e)}"
42
 
43
- # Handle image
44
- if image is not None:
45
- try:
46
- buffered = BytesIO()
47
- image.save(buffered, format="JPEG")
48
- base64_image = base64.b64encode(buffered.getvalue()).decode("utf-8")
49
-
50
- content = [
51
- {"type": "text", "text": message},
52
- {
53
- "type": "image_url",
54
- "image_url": {
55
- "url": f"data:image/jpeg;base64,{base64_image}"
56
- }
57
- }
58
- ]
59
- messages.append({"role": "user", "content": content})
60
- except Exception as e:
61
- messages.append({"role": "user", "content": message})
62
- return f"Error processing image: {str(e)}"
63
- else:
64
- messages.append({"role": "user", "content": message})
65
-
66
- # API call
67
  try:
68
  response = requests.post(
69
  "https://openrouter.ai/api/v1/chat/completions",
70
  headers={
71
  "Content-Type": "application/json",
72
  "Authorization": f"Bearer {OPENROUTER_API_KEY}",
73
- "HTTP-Referer": "https://huggingface.co/spaces",
74
  },
75
  json={
76
  "model": model_id,
@@ -80,156 +48,35 @@ def get_ai_response(message, history, model_name, image=None, file=None):
80
  },
81
  timeout=60
82
  )
83
- response.raise_for_status()
84
-
85
- result = response.json()
86
- return result.get("choices", [{}])[0].get("message", {}).get("content", "No response")
87
- except Exception as e:
88
- return f"Error: {str(e)}"
89
-
90
- def clear_inputs():
91
- """Clear input fields"""
92
- return "", None, None
93
-
94
- with gr.Blocks() as demo:
95
- gr.Markdown("# 🔆 CrispChat")
96
-
97
- chatbot = gr.Chatbot(
98
- height=450,
99
- type="messages" # Use the new format as suggested in the warning
100
- )
101
-
102
- model_selector = gr.Dropdown(
103
- choices=[name for name, _ in models],
104
- value=models[0][0],
105
- label="Model"
106
- )
107
-
108
- msg_input = gr.Textbox(
109
- placeholder="Type your message here...",
110
- lines=3,
111
- label="Message"
112
- )
113
-
114
- img_input = gr.Image(
115
- type="pil",
116
- label="Image (optional)"
117
- )
118
-
119
- file_input = gr.File(
120
- label="Text File (optional)"
121
- )
122
-
123
- with gr.Row():
124
- submit_btn = gr.Button("Send")
125
- clear_btn = gr.Button("Clear Chat")
126
-
127
- # Define clear function
128
- def clear_chat():
129
- return []
130
-
131
- # Submit function
132
- def submit_message(message, chat_history, model, image, file):
133
- if not message and not image and not file:
134
- return chat_history, "", None, None
135
 
136
- response = get_ai_response(message, chat_history, model, image, file)
137
- chat_history.append((message, response))
138
- return chat_history, "", None, None
139
-
140
- # Set up events
141
- submit_btn.click(
142
- fn=submit_message,
143
- inputs=[msg_input, chatbot, model_selector, img_input, file_input],
144
- outputs=[chatbot, msg_input, img_input, file_input]
145
- )
146
-
147
- msg_input.submit(
148
- fn=submit_message,
149
- inputs=[msg_input, chatbot, model_selector, img_input, file_input],
150
- outputs=[chatbot, msg_input, img_input, file_input]
151
- )
152
-
153
- clear_btn.click(
154
- fn=clear_chat,
155
- outputs=[chatbot]
156
- )
157
-
158
- # FastAPI endpoint
159
- from fastapi import FastAPI
160
- from pydantic import BaseModel
161
-
162
- app = FastAPI()
163
-
164
- class GenerateRequest(BaseModel):
165
- message: str
166
- model: str = None
167
- image_data: str = None
168
-
169
- @app.post("/api/generate")
170
- async def api_generate(request: GenerateRequest):
171
- """API endpoint for text generation"""
172
- try:
173
- model_id = request.model or models[0][1]
174
-
175
- # Prepare messages
176
- messages = []
177
-
178
- # Handle image
179
- if request.image_data:
180
- try:
181
- # Decode base64 image
182
- image_bytes = base64.b64decode(request.image_data)
183
- image = Image.open(BytesIO(image_bytes))
184
-
185
- # Re-encode
186
- buffered = BytesIO()
187
- image.save(buffered, format="JPEG")
188
- base64_image = base64.b64encode(buffered.getvalue()).decode("utf-8")
189
-
190
- content = [
191
- {"type": "text", "text": request.message},
192
- {
193
- "type": "image_url",
194
- "image_url": {
195
- "url": f"data:image/jpeg;base64,{base64_image}"
196
- }
197
- }
198
- ]
199
- messages.append({"role": "user", "content": content})
200
- except Exception as e:
201
- return {"error": f"Image processing error: {str(e)}"}
202
  else:
203
- messages.append({"role": "user", "content": request.message})
204
-
205
- # API call
206
- response = requests.post(
207
- "https://openrouter.ai/api/v1/chat/completions",
208
- headers={
209
- "Content-Type": "application/json",
210
- "Authorization": f"Bearer {OPENROUTER_API_KEY}",
211
- "HTTP-Referer": "https://huggingface.co/spaces",
212
- },
213
- json={
214
- "model": model_id,
215
- "messages": messages,
216
- "temperature": 0.7
217
- },
218
- timeout=60
219
- )
220
- response.raise_for_status()
221
-
222
- result = response.json()
223
- reply = result.get("choices", [{}])[0].get("message", {}).get("content", "No response")
224
-
225
- return {"response": reply}
226
  except Exception as e:
227
- return {"error": f"Error: {str(e)}"}
 
 
228
 
229
- # Mount Gradio app
230
- app = gr.mount_gradio_app(app, demo, path="/")
 
 
 
 
 
 
 
 
 
 
 
 
 
 
231
 
232
- # Launch
233
  if __name__ == "__main__":
234
- import uvicorn
235
- uvicorn.run(app, host="0.0.0.0", port=7860)
 
1
  import os
 
2
  import gradio as gr
3
  import requests
4
  import json
 
 
5
 
6
+ # API key
7
  OPENROUTER_API_KEY = os.environ.get("OPENROUTER_API_KEY", "")
8
 
9
+ # Basic model list
10
+ MODELS = [
11
+ ("Gemini Pro 2.0", "google/gemini-2.0-pro-exp-02-05:free"),
12
+ ("Llama 3.2 Vision", "meta-llama/llama-3.2-11b-vision-instruct:free")
 
 
 
 
13
  ]
14
 
15
+ def ask_ai(message, history, model_choice):
16
+ """Basic AI query function"""
17
+ # Get model ID
18
+ model_id = MODELS[0][1] # Default
19
+ for name, model_id_value in MODELS:
20
+ if name == model_choice:
21
+ model_id = model_id_value
22
+ break
23
 
24
+ # Create messages from history
25
  messages = []
26
+ for human_msg, ai_msg in history:
27
+ messages.append({"role": "user", "content": human_msg})
28
+ if ai_msg:
29
+ messages.append({"role": "assistant", "content": ai_msg})
30
 
31
+ # Add current message
32
+ messages.append({"role": "user", "content": message})
 
 
 
 
 
 
33
 
34
+ # Call API
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
35
  try:
36
  response = requests.post(
37
  "https://openrouter.ai/api/v1/chat/completions",
38
  headers={
39
  "Content-Type": "application/json",
40
  "Authorization": f"Bearer {OPENROUTER_API_KEY}",
41
+ "HTTP-Referer": "https://huggingface.co/spaces"
42
  },
43
  json={
44
  "model": model_id,
 
48
  },
49
  timeout=60
50
  )
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
51
 
52
+ if response.status_code == 200:
53
+ result = response.json()
54
+ ai_response = result.get("choices", [{}])[0].get("message", {}).get("content", "")
55
+ history.append((message, ai_response))
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
56
  else:
57
+ history.append((message, f"Error: Status code {response.status_code}"))
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
58
  except Exception as e:
59
+ history.append((message, f"Error: {str(e)}"))
60
+
61
+ return history, ""
62
 
63
+ # Create minimal interface
64
+ demo = gr.Interface(
65
+ fn=ask_ai,
66
+ inputs=[
67
+ gr.Textbox(placeholder="Type your message here...", label="Message"),
68
+ gr.State([]),
69
+ gr.Radio([name for name, _ in MODELS], value=MODELS[0][0], label="Model")
70
+ ],
71
+ outputs=[
72
+ gr.Chatbot(label="Conversation", height=500, type="messages"),
73
+ gr.Textbox(visible=False)
74
+ ],
75
+ title="Simple AI Chat",
76
+ description="A minimalist chat interface for OpenRouter AI models",
77
+ allow_flagging="never"
78
+ )
79
 
80
+ # Launch directly with Gradio's built-in server
81
  if __name__ == "__main__":
82
+ demo.launch(server_name="0.0.0.0", server_port=7860)