Update app.py
Browse files
app.py
CHANGED
@@ -2,14 +2,92 @@ import os
|
|
2 |
import gradio as gr
|
3 |
import requests
|
4 |
import json
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
5 |
|
6 |
# API key
|
7 |
OPENROUTER_API_KEY = os.environ.get("OPENROUTER_API_KEY", "")
|
8 |
|
9 |
-
#
|
10 |
MODELS = [
|
11 |
-
|
12 |
-
("Llama 3.2 Vision", "meta-llama/llama-3.2-11b-vision-instruct:free")
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
13 |
]
|
14 |
|
15 |
def format_to_message_dict(history):
|
@@ -24,26 +102,90 @@ def format_to_message_dict(history):
|
|
24 |
messages.append({"role": "assistant", "content": ai})
|
25 |
return messages
|
26 |
|
27 |
-
def
|
28 |
-
"""
|
29 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
30 |
return chatbot, ""
|
31 |
|
32 |
-
# Get model ID
|
33 |
-
model_id =
|
34 |
-
|
|
|
35 |
if name == model_choice:
|
36 |
model_id = model_id_value
|
|
|
37 |
break
|
38 |
|
|
|
|
|
|
|
|
|
39 |
# Create messages from chatbot history
|
40 |
messages = format_to_message_dict(chatbot)
|
41 |
|
|
|
|
|
|
|
|
|
|
|
|
|
42 |
# Add current message
|
43 |
-
messages.append({"role": "user", "content":
|
44 |
|
45 |
# Call API
|
46 |
try:
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
47 |
response = requests.post(
|
48 |
"https://openrouter.ai/api/v1/chat/completions",
|
49 |
headers={
|
@@ -51,70 +193,151 @@ def ask_ai(message, chatbot, model_choice):
|
|
51 |
"Authorization": f"Bearer {OPENROUTER_API_KEY}",
|
52 |
"HTTP-Referer": "https://huggingface.co/spaces"
|
53 |
},
|
54 |
-
json=
|
55 |
-
"model": model_id,
|
56 |
-
"messages": messages,
|
57 |
-
"temperature": 0.7,
|
58 |
-
"max_tokens": 1000
|
59 |
-
},
|
60 |
timeout=60
|
61 |
)
|
62 |
|
|
|
|
|
|
|
|
|
|
|
|
|
63 |
if response.status_code == 200:
|
64 |
result = response.json()
|
65 |
ai_response = result.get("choices", [{}])[0].get("message", {}).get("content", "")
|
66 |
chatbot = chatbot + [[message, ai_response]]
|
|
|
|
|
|
|
|
|
67 |
else:
|
68 |
-
|
|
|
69 |
except Exception as e:
|
|
|
70 |
chatbot = chatbot + [[message, f"Error: {str(e)}"]]
|
71 |
|
72 |
return chatbot, ""
|
73 |
|
74 |
def clear_chat():
|
75 |
-
return [], ""
|
76 |
|
77 |
-
# Create
|
78 |
-
with gr.Blocks() as demo:
|
79 |
-
gr.Markdown("
|
80 |
-
|
81 |
-
chatbot = gr.Chatbot(height=400)
|
82 |
|
83 |
-
with
|
84 |
-
|
85 |
-
|
86 |
-
label="Message",
|
87 |
-
lines=2
|
88 |
-
)
|
89 |
|
90 |
with gr.Row():
|
91 |
-
|
92 |
-
|
93 |
-
|
94 |
-
|
95 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
96 |
|
97 |
-
|
98 |
-
|
99 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
100 |
|
101 |
# Set up events
|
102 |
submit_btn.click(
|
103 |
fn=ask_ai,
|
104 |
-
inputs=[message, chatbot, model_choice],
|
105 |
outputs=[chatbot, message]
|
106 |
)
|
107 |
|
108 |
message.submit(
|
109 |
fn=ask_ai,
|
110 |
-
inputs=[message, chatbot, model_choice],
|
111 |
outputs=[chatbot, message]
|
112 |
)
|
113 |
|
114 |
clear_btn.click(
|
115 |
fn=clear_chat,
|
116 |
inputs=[],
|
117 |
-
outputs=[chatbot, message]
|
118 |
)
|
119 |
|
120 |
# Launch directly with Gradio's built-in server
|
|
|
2 |
import gradio as gr
|
3 |
import requests
|
4 |
import json
|
5 |
+
import base64
|
6 |
+
from PIL import Image
|
7 |
+
import io
|
8 |
+
import logging
|
9 |
+
|
10 |
+
# Configure logging
|
11 |
+
logging.basicConfig(level=logging.INFO, format='%(asctime)s - %(name)s - %(levelname)s - %(message)s')
|
12 |
+
logger = logging.getLogger(__name__)
|
13 |
|
14 |
# API key
|
15 |
OPENROUTER_API_KEY = os.environ.get("OPENROUTER_API_KEY", "")
|
16 |
|
17 |
+
# Model list with context sizes
|
18 |
MODELS = [
|
19 |
+
# Vision Models
|
20 |
+
("Meta: Llama 3.2 11B Vision Instruct (free)", "meta-llama/llama-3.2-11b-vision-instruct:free", 131072),
|
21 |
+
("Qwen: Qwen2.5 VL 72B Instruct (free)", "qwen/qwen2.5-vl-72b-instruct:free", 131072),
|
22 |
+
("Qwen: Qwen2.5 VL 32B Instruct (free)", "qwen/qwen2.5-vl-32b-instruct:free", 8192),
|
23 |
+
("Qwen: Qwen2.5 VL 7B Instruct (free)", "qwen/qwen-2.5-vl-7b-instruct:free", 64000),
|
24 |
+
("Qwen: Qwen2.5 VL 3B Instruct (free)", "qwen/qwen2.5-vl-3b-instruct:free", 64000),
|
25 |
+
|
26 |
+
# Gemini Models
|
27 |
+
("Google: Gemini Pro 2.0 Experimental (free)", "google/gemini-2.0-pro-exp-02-05:free", 2000000),
|
28 |
+
("Google: Gemini Pro 2.5 Experimental (free)", "google/gemini-2.5-pro-exp-03-25:free", 1000000),
|
29 |
+
("Google: Gemini 2.0 Flash Thinking Experimental 01-21 (free)", "google/gemini-2.0-flash-thinking-exp:free", 1048576),
|
30 |
+
("Google: Gemini Flash 2.0 Experimental (free)", "google/gemini-2.0-flash-exp:free", 1048576),
|
31 |
+
("Google: Gemini Flash 1.5 8B Experimental", "google/gemini-flash-1.5-8b-exp", 1000000),
|
32 |
+
("Google: Gemini 2.0 Flash Thinking Experimental (free)", "google/gemini-2.0-flash-thinking-exp-1219:free", 40000),
|
33 |
+
("Google: LearnLM 1.5 Pro Experimental (free)", "google/learnlm-1.5-pro-experimental:free", 40960),
|
34 |
+
|
35 |
+
# Llama Models
|
36 |
+
("Meta: Llama 3.3 70B Instruct (free)", "meta-llama/llama-3.3-70b-instruct:free", 8000),
|
37 |
+
("Meta: Llama 3.2 3B Instruct (free)", "meta-llama/llama-3.2-3b-instruct:free", 20000),
|
38 |
+
("Meta: Llama 3.2 1B Instruct (free)", "meta-llama/llama-3.2-1b-instruct:free", 131072),
|
39 |
+
("Meta: Llama 3.1 8B Instruct (free)", "meta-llama/llama-3.1-8b-instruct:free", 131072),
|
40 |
+
("Meta: Llama 3 8B Instruct (free)", "meta-llama/llama-3-8b-instruct:free", 8192),
|
41 |
+
("NVIDIA: Llama 3.1 Nemotron 70B Instruct (free)", "nvidia/llama-3.1-nemotron-70b-instruct:free", 131072),
|
42 |
+
|
43 |
+
# DeepSeek Models
|
44 |
+
("DeepSeek: DeepSeek R1 Zero (free)", "deepseek/deepseek-r1-zero:free", 163840),
|
45 |
+
("DeepSeek: R1 (free)", "deepseek/deepseek-r1:free", 163840),
|
46 |
+
("DeepSeek: DeepSeek V3 Base (free)", "deepseek/deepseek-v3-base:free", 131072),
|
47 |
+
("DeepSeek: DeepSeek V3 0324 (free)", "deepseek/deepseek-v3-0324:free", 131072),
|
48 |
+
("DeepSeek: DeepSeek V3 (free)", "deepseek/deepseek-chat:free", 131072),
|
49 |
+
("DeepSeek: R1 Distill Qwen 14B (free)", "deepseek/deepseek-r1-distill-qwen-14b:free", 64000),
|
50 |
+
("DeepSeek: R1 Distill Qwen 32B (free)", "deepseek/deepseek-r1-distill-qwen-32b:free", 16000),
|
51 |
+
("DeepSeek: R1 Distill Llama 70B (free)", "deepseek/deepseek-r1-distill-llama-70b:free", 8192),
|
52 |
+
|
53 |
+
# Gemma Models
|
54 |
+
("Google: Gemma 3 27B (free)", "google/gemma-3-27b-it:free", 96000),
|
55 |
+
("Google: Gemma 3 12B (free)", "google/gemma-3-12b-it:free", 131072),
|
56 |
+
("Google: Gemma 3 4B (free)", "google/gemma-3-4b-it:free", 131072),
|
57 |
+
("Google: Gemma 3 1B (free)", "google/gemma-3-1b-it:free", 32768),
|
58 |
+
("Google: Gemma 2 9B (free)", "google/gemma-2-9b-it:free", 8192),
|
59 |
+
|
60 |
+
# Mistral Models
|
61 |
+
("Mistral: Mistral Nemo (free)", "mistralai/mistral-nemo:free", 128000),
|
62 |
+
("Mistral: Mistral Small 3.1 24B (free)", "mistralai/mistral-small-3.1-24b-instruct:free", 96000),
|
63 |
+
("Mistral: Mistral Small 3 (free)", "mistralai/mistral-small-24b-instruct-2501:free", 32768),
|
64 |
+
("Mistral: Mistral 7B Instruct (free)", "mistralai/mistral-7b-instruct:free", 8192),
|
65 |
+
|
66 |
+
# Qwen Models
|
67 |
+
("Qwen: Qwen2.5 72B Instruct (free)", "qwen/qwen-2.5-72b-instruct:free", 32768),
|
68 |
+
("Qwen: QwQ 32B (free)", "qwen/qwq-32b:free", 40000),
|
69 |
+
("Qwen: QwQ 32B Preview (free)", "qwen/qwq-32b-preview:free", 16384),
|
70 |
+
("Qwen2.5 Coder 32B Instruct (free)", "qwen/qwen-2.5-coder-32b-instruct:free", 32768),
|
71 |
+
("Qwen 2 7B Instruct (free)", "qwen/qwen-2-7b-instruct:free", 8192),
|
72 |
+
|
73 |
+
# Other Models
|
74 |
+
("Nous: DeepHermes 3 Llama 3 8B Preview (free)", "nousresearch/deephermes-3-llama-3-8b-preview:free", 131072),
|
75 |
+
("Moonshot AI: Moonlight 16B A3B Instruct (free)", "moonshotai/moonlight-16b-a3b-instruct:free", 8192),
|
76 |
+
("Microsoft: Phi-3 Mini 128K Instruct (free)", "microsoft/phi-3-mini-128k-instruct:free", 8192),
|
77 |
+
("Microsoft: Phi-3 Medium 128K Instruct (free)", "microsoft/phi-3-medium-128k-instruct:free", 8192),
|
78 |
+
("OpenChat 3.5 7B (free)", "openchat/openchat-7b:free", 8192),
|
79 |
+
("Reka: Flash 3 (free)", "rekaai/reka-flash-3:free", 32768),
|
80 |
+
("Dolphin3.0 R1 Mistral 24B (free)", "cognitivecomputations/dolphin3.0-r1-mistral-24b:free", 32768),
|
81 |
+
("Dolphin3.0 Mistral 24B (free)", "cognitivecomputations/dolphin3.0-mistral-24b:free", 32768),
|
82 |
+
("Bytedance: UI-TARS 72B (free)", "bytedance-research/ui-tars-72b:free", 32768),
|
83 |
+
("Qwerky 72b (free)", "featherless/qwerky-72b:free", 32768),
|
84 |
+
("OlympicCoder 7B (free)", "open-r1/olympiccoder-7b:free", 32768),
|
85 |
+
("OlympicCoder 32B (free)", "open-r1/olympiccoder-32b:free", 32768),
|
86 |
+
("Rogue Rose 103B v0.2 (free)", "sophosympatheia/rogue-rose-103b-v0.2:free", 4096),
|
87 |
+
("Toppy M 7B (free)", "undi95/toppy-m-7b:free", 4096),
|
88 |
+
("Hugging Face: Zephyr 7B (free)", "huggingfaceh4/zephyr-7b-beta:free", 4096),
|
89 |
+
("MythoMax 13B (free)", "gryphe/mythomax-l2-13b:free", 4096),
|
90 |
+
("AllenAI: Molmo 7B D (free)", "allenai/molmo-7b-d:free", 4096),
|
91 |
]
|
92 |
|
93 |
def format_to_message_dict(history):
|
|
|
102 |
messages.append({"role": "assistant", "content": ai})
|
103 |
return messages
|
104 |
|
105 |
+
def encode_image_to_base64(image_path):
|
106 |
+
"""Encode an image file to base64 string"""
|
107 |
+
try:
|
108 |
+
if isinstance(image_path, str): # File path as string
|
109 |
+
with open(image_path, "rb") as image_file:
|
110 |
+
encoded_string = base64.b64encode(image_file.read()).decode('utf-8')
|
111 |
+
file_extension = image_path.split('.')[-1].lower()
|
112 |
+
mime_type = f"image/{file_extension}"
|
113 |
+
if file_extension == "jpg" or file_extension == "jpeg":
|
114 |
+
mime_type = "image/jpeg"
|
115 |
+
return f"data:{mime_type};base64,{encoded_string}"
|
116 |
+
else: # Pillow Image or file-like object
|
117 |
+
buffered = io.BytesIO()
|
118 |
+
image_path.save(buffered, format="PNG")
|
119 |
+
encoded_string = base64.b64encode(buffered.getvalue()).decode('utf-8')
|
120 |
+
return f"data:image/png;base64,{encoded_string}"
|
121 |
+
except Exception as e:
|
122 |
+
logger.error(f"Error encoding image: {str(e)}")
|
123 |
+
return None
|
124 |
+
|
125 |
+
def prepare_message_with_images(text, images):
|
126 |
+
"""Prepare a message with text and images"""
|
127 |
+
if not images:
|
128 |
+
return text
|
129 |
+
|
130 |
+
content = [{"type": "text", "text": text}]
|
131 |
+
|
132 |
+
for img in images:
|
133 |
+
if img is None:
|
134 |
+
continue
|
135 |
+
|
136 |
+
encoded_image = encode_image_to_base64(img)
|
137 |
+
if encoded_image:
|
138 |
+
content.append({
|
139 |
+
"type": "image_url",
|
140 |
+
"image_url": {"url": encoded_image}
|
141 |
+
})
|
142 |
+
|
143 |
+
return content
|
144 |
+
|
145 |
+
def ask_ai(message, chatbot, model_choice, temperature, max_tokens, uploaded_files):
|
146 |
+
"""Enhanced AI query function with file upload support and detailed logging"""
|
147 |
+
if not message.strip() and not uploaded_files:
|
148 |
return chatbot, ""
|
149 |
|
150 |
+
# Get model ID and context size
|
151 |
+
model_id = None
|
152 |
+
context_size = 0
|
153 |
+
for name, model_id_value, ctx_size in MODELS:
|
154 |
if name == model_choice:
|
155 |
model_id = model_id_value
|
156 |
+
context_size = ctx_size
|
157 |
break
|
158 |
|
159 |
+
if model_id is None:
|
160 |
+
logger.error(f"Model not found: {model_choice}")
|
161 |
+
return chatbot + [[message, "Error: Model not found"]], ""
|
162 |
+
|
163 |
# Create messages from chatbot history
|
164 |
messages = format_to_message_dict(chatbot)
|
165 |
|
166 |
+
# Prepare message with images if any
|
167 |
+
if uploaded_files:
|
168 |
+
content = prepare_message_with_images(message, uploaded_files)
|
169 |
+
else:
|
170 |
+
content = message
|
171 |
+
|
172 |
# Add current message
|
173 |
+
messages.append({"role": "user", "content": content})
|
174 |
|
175 |
# Call API
|
176 |
try:
|
177 |
+
logger.info(f"Sending request to model: {model_id}")
|
178 |
+
logger.info(f"Messages: {json.dumps(messages)}")
|
179 |
+
|
180 |
+
payload = {
|
181 |
+
"model": model_id,
|
182 |
+
"messages": messages,
|
183 |
+
"temperature": temperature,
|
184 |
+
"max_tokens": max_tokens
|
185 |
+
}
|
186 |
+
|
187 |
+
logger.info(f"Request payload: {json.dumps(payload)}")
|
188 |
+
|
189 |
response = requests.post(
|
190 |
"https://openrouter.ai/api/v1/chat/completions",
|
191 |
headers={
|
|
|
193 |
"Authorization": f"Bearer {OPENROUTER_API_KEY}",
|
194 |
"HTTP-Referer": "https://huggingface.co/spaces"
|
195 |
},
|
196 |
+
json=payload,
|
|
|
|
|
|
|
|
|
|
|
197 |
timeout=60
|
198 |
)
|
199 |
|
200 |
+
logger.info(f"Response status: {response.status_code}")
|
201 |
+
logger.info(f"Response headers: {response.headers}")
|
202 |
+
|
203 |
+
response_text = response.text
|
204 |
+
logger.info(f"Response body: {response_text}")
|
205 |
+
|
206 |
if response.status_code == 200:
|
207 |
result = response.json()
|
208 |
ai_response = result.get("choices", [{}])[0].get("message", {}).get("content", "")
|
209 |
chatbot = chatbot + [[message, ai_response]]
|
210 |
+
|
211 |
+
# Log token usage if available
|
212 |
+
if "usage" in result:
|
213 |
+
logger.info(f"Token usage: {result['usage']}")
|
214 |
else:
|
215 |
+
error_message = f"Error: Status code {response.status_code}\n\nResponse: {response_text}"
|
216 |
+
chatbot = chatbot + [[message, error_message]]
|
217 |
except Exception as e:
|
218 |
+
logger.error(f"Exception during API call: {str(e)}")
|
219 |
chatbot = chatbot + [[message, f"Error: {str(e)}"]]
|
220 |
|
221 |
return chatbot, ""
|
222 |
|
223 |
def clear_chat():
|
224 |
+
return [], "", [], 0.7, 1000
|
225 |
|
226 |
+
# Create enhanced interface
|
227 |
+
with gr.Blocks(css="footer {visibility: hidden}") as demo:
|
228 |
+
gr.Markdown("""
|
229 |
+
# Enhanced AI Chat
|
|
|
230 |
|
231 |
+
This interface allows you to chat with various free AI models from OpenRouter.
|
232 |
+
You can upload images for vision-capable models and adjust parameters.
|
233 |
+
""")
|
|
|
|
|
|
|
234 |
|
235 |
with gr.Row():
|
236 |
+
with gr.Column(scale=2):
|
237 |
+
chatbot = gr.Chatbot(height=500, show_copy_button=True, show_label=False)
|
238 |
+
|
239 |
+
with gr.Row():
|
240 |
+
message = gr.Textbox(
|
241 |
+
placeholder="Type your message here...",
|
242 |
+
label="Message",
|
243 |
+
lines=2
|
244 |
+
)
|
245 |
+
|
246 |
+
with gr.Row():
|
247 |
+
with gr.Column(scale=3):
|
248 |
+
submit_btn = gr.Button("Send", variant="primary")
|
249 |
+
|
250 |
+
with gr.Column(scale=1):
|
251 |
+
clear_btn = gr.Button("Clear Chat", variant="secondary")
|
252 |
+
|
253 |
+
with gr.Row():
|
254 |
+
uploaded_files = gr.Gallery(
|
255 |
+
label="Uploaded Images",
|
256 |
+
show_label=True,
|
257 |
+
elem_id="gallery",
|
258 |
+
columns=4,
|
259 |
+
height=150,
|
260 |
+
visible=False
|
261 |
+
)
|
262 |
+
|
263 |
+
with gr.Row():
|
264 |
+
upload_btn = gr.UploadButton(
|
265 |
+
label="Upload Images (for vision models)",
|
266 |
+
file_types=["image"],
|
267 |
+
file_count="multiple"
|
268 |
+
)
|
269 |
|
270 |
+
with gr.Column(scale=1):
|
271 |
+
with gr.Group():
|
272 |
+
gr.Markdown("### Model Selection")
|
273 |
+
model_names = [name for name, _, _ in MODELS]
|
274 |
+
model_choice = gr.Radio(
|
275 |
+
model_names,
|
276 |
+
value=model_names[0],
|
277 |
+
label="Choose a Model"
|
278 |
+
)
|
279 |
+
|
280 |
+
with gr.Accordion("Model Context", open=False):
|
281 |
+
context_info = gr.HTML(value="<p>Select a model to see its context window</p>")
|
282 |
+
|
283 |
+
with gr.Accordion("Parameters", open=False):
|
284 |
+
temperature = gr.Slider(
|
285 |
+
minimum=0.1,
|
286 |
+
maximum=2.0,
|
287 |
+
value=0.7,
|
288 |
+
step=0.1,
|
289 |
+
label="Temperature"
|
290 |
+
)
|
291 |
+
|
292 |
+
max_tokens = gr.Slider(
|
293 |
+
minimum=100,
|
294 |
+
maximum=4000,
|
295 |
+
value=1000,
|
296 |
+
step=100,
|
297 |
+
label="Max Tokens"
|
298 |
+
)
|
299 |
+
|
300 |
+
# Set up context window display
|
301 |
+
def update_context_info(model_name):
|
302 |
+
for name, _, ctx_size in MODELS:
|
303 |
+
if name == model_name:
|
304 |
+
return f"<p><b>Context window:</b> {ctx_size:,} tokens</p>"
|
305 |
+
return "<p>Model information not found</p>"
|
306 |
+
|
307 |
+
model_choice.change(
|
308 |
+
fn=update_context_info,
|
309 |
+
inputs=[model_choice],
|
310 |
+
outputs=[context_info]
|
311 |
+
)
|
312 |
+
|
313 |
+
# Process uploaded files
|
314 |
+
def process_uploaded_files(files):
|
315 |
+
file_paths = [file.name for file in files]
|
316 |
+
return file_paths, gr.update(visible=True)
|
317 |
+
|
318 |
+
upload_btn.upload(
|
319 |
+
fn=process_uploaded_files,
|
320 |
+
inputs=[upload_btn],
|
321 |
+
outputs=[uploaded_files, uploaded_files]
|
322 |
+
)
|
323 |
|
324 |
# Set up events
|
325 |
submit_btn.click(
|
326 |
fn=ask_ai,
|
327 |
+
inputs=[message, chatbot, model_choice, temperature, max_tokens, uploaded_files],
|
328 |
outputs=[chatbot, message]
|
329 |
)
|
330 |
|
331 |
message.submit(
|
332 |
fn=ask_ai,
|
333 |
+
inputs=[message, chatbot, model_choice, temperature, max_tokens, uploaded_files],
|
334 |
outputs=[chatbot, message]
|
335 |
)
|
336 |
|
337 |
clear_btn.click(
|
338 |
fn=clear_chat,
|
339 |
inputs=[],
|
340 |
+
outputs=[chatbot, message, uploaded_files, temperature, max_tokens]
|
341 |
)
|
342 |
|
343 |
# Launch directly with Gradio's built-in server
|