Update app.py
Browse files
app.py
CHANGED
@@ -71,8 +71,13 @@ OPENROUTER_API_KEY = os.environ.get("OPENROUTER_API_KEY", "")
|
|
71 |
OPENAI_API_KEY = os.environ.get("OPENAI_API_KEY", "")
|
72 |
GROQ_API_KEY = os.environ.get("GROQ_API_KEY", "")
|
73 |
COHERE_API_KEY = os.environ.get("COHERE_API_KEY", "")
|
74 |
-
GLHF_API_KEY = os.environ.get("GLHF_API_KEY", "")
|
75 |
HF_API_KEY = os.environ.get("HF_API_KEY", "")
|
|
|
|
|
|
|
|
|
|
|
|
|
76 |
|
77 |
# ==========================================================
|
78 |
# MODEL DEFINITIONS
|
@@ -83,7 +88,7 @@ HF_API_KEY = os.environ.get("HF_API_KEY", "")
|
|
83 |
OPENROUTER_MODELS = [
|
84 |
# 1M+ Context Models
|
85 |
{"category": "1M+ Context", "models": [
|
86 |
-
|
87 |
("Google: Gemini 2.0 Flash Thinking Experimental 01-21", "google/gemini-2.0-flash-thinking-exp:free", 1048576),
|
88 |
("Google: Gemini Flash 2.0 Experimental", "google/gemini-2.0-flash-exp:free", 1048576),
|
89 |
("Google: Gemini Pro 2.5 Experimental", "google/gemini-2.5-pro-exp-03-25:free", 1000000),
|
@@ -164,7 +169,7 @@ OPENROUTER_MODELS = [
|
|
164 |
|
165 |
# Vision-capable Models
|
166 |
{"category": "Vision Models", "models": [
|
167 |
-
|
168 |
("Google: Gemini 2.0 Flash Thinking Experimental 01-21", "google/gemini-2.0-flash-thinking-exp:free", 1048576),
|
169 |
("Google: Gemini Flash 2.0 Experimental", "google/gemini-2.0-flash-exp:free", 1048576),
|
170 |
("Google: Gemini Pro 2.5 Experimental", "google/gemini-2.5-pro-exp-03-25:free", 1000000),
|
@@ -193,6 +198,31 @@ for category in OPENROUTER_MODELS:
|
|
193 |
if model not in OPENROUTER_ALL_MODELS: # Avoid duplicates
|
194 |
OPENROUTER_ALL_MODELS.append(model)
|
195 |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
196 |
# OPENAI MODELS
|
197 |
OPENAI_MODELS = {
|
198 |
"gpt-3.5-turbo": 16385,
|
@@ -211,13 +241,8 @@ OPENAI_MODELS = {
|
|
211 |
"gpt-4o-2024-11-20": 128000,
|
212 |
"gpt-4o-2024-08-06": 128000,
|
213 |
"gpt-4o-2024-05-13": 128000,
|
214 |
-
"chatgpt-4o-latest": 128000,
|
215 |
"gpt-4o-mini": 128000,
|
216 |
"gpt-4o-mini-2024-07-18": 128000,
|
217 |
-
"gpt-4o-realtime-preview": 128000,
|
218 |
-
"gpt-4o-realtime-preview-2024-10-01": 128000,
|
219 |
-
"gpt-4o-audio-preview": 128000,
|
220 |
-
"gpt-4o-audio-preview-2024-10-01": 128000,
|
221 |
"o1-preview": 128000,
|
222 |
"o1-preview-2024-09-12": 128000,
|
223 |
"o1-mini": 128000,
|
@@ -233,7 +258,6 @@ HUGGINGFACE_MODELS = {
|
|
233 |
"mistralai/Mistral-7B-Instruct-v0.3": 32768,
|
234 |
"NousResearch/Nous-Hermes-2-Mixtral-8x7B-DPO": 32768,
|
235 |
"microsoft/Phi-3.5-mini-instruct": 4096,
|
236 |
-
"HuggingFaceTB/SmolLM2-1.7B-Instruct": 2048,
|
237 |
"google/gemma-2-2b-it": 2048,
|
238 |
"openai-community/gpt2": 1024,
|
239 |
"microsoft/phi-2": 2048,
|
@@ -245,6 +269,10 @@ HUGGINGFACE_MODELS = {
|
|
245 |
"Qwen/Qwen2.5-7B-Instruct": 131072,
|
246 |
"tiiuae/falcon-7b-instruct": 8192,
|
247 |
"Qwen/QwQ-32B-preview": 32768,
|
|
|
|
|
|
|
|
|
248 |
}
|
249 |
|
250 |
# GROQ MODELS - We'll populate this dynamically
|
@@ -264,6 +292,8 @@ DEFAULT_GROQ_MODELS = {
|
|
264 |
"llama-3.1-70b-specdec": 131072,
|
265 |
"llama-3.2-1b-preview": 131072,
|
266 |
"llama-3.2-3b-preview": 131072,
|
|
|
|
|
267 |
}
|
268 |
|
269 |
# COHERE MODELS
|
@@ -282,18 +312,44 @@ COHERE_MODELS = {
|
|
282 |
"c4ai-aya-expanse-32b": 131072,
|
283 |
}
|
284 |
|
285 |
-
#
|
286 |
-
|
287 |
-
"
|
288 |
-
"
|
289 |
-
"
|
290 |
-
"
|
291 |
-
"
|
292 |
-
"
|
293 |
-
|
294 |
-
|
295 |
-
|
296 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
297 |
}
|
298 |
|
299 |
# ==========================================================
|
@@ -323,6 +379,8 @@ def fetch_groq_models():
|
|
323 |
context_size = 32768
|
324 |
elif "gemma" in model_id:
|
325 |
context_size = 8192
|
|
|
|
|
326 |
else:
|
327 |
context_size = 8192 # Default assumption
|
328 |
|
@@ -500,8 +558,14 @@ def filter_models(provider, search_term):
|
|
500 |
all_models = list(GROQ_MODELS.keys())
|
501 |
elif provider == "Cohere":
|
502 |
all_models = list(COHERE_MODELS.keys())
|
503 |
-
elif provider == "
|
504 |
-
all_models = list(
|
|
|
|
|
|
|
|
|
|
|
|
|
505 |
else:
|
506 |
return [], None
|
507 |
|
@@ -533,9 +597,18 @@ def get_model_info(provider, model_choice):
|
|
533 |
elif provider == "Cohere":
|
534 |
if model_choice in COHERE_MODELS:
|
535 |
return model_choice, COHERE_MODELS[model_choice]
|
536 |
-
elif provider == "
|
537 |
-
if model_choice in
|
538 |
-
return model_choice,
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
539 |
|
540 |
return None, 0
|
541 |
|
@@ -544,6 +617,18 @@ def update_context_display(provider, model_name):
|
|
544 |
_, ctx_size = get_model_info(provider, model_name)
|
545 |
return f"{ctx_size:,}" if ctx_size else "Unknown"
|
546 |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
547 |
def update_model_info(provider, model_name):
|
548 |
"""Generate HTML info display for the selected model"""
|
549 |
model_id, ctx_size = get_model_info(provider, model_name)
|
@@ -551,22 +636,9 @@ def update_model_info(provider, model_name):
|
|
551 |
return "<p>Model information not available</p>"
|
552 |
|
553 |
# Check if this is a vision model
|
554 |
-
|
555 |
|
556 |
-
#
|
557 |
-
if provider == "OpenRouter":
|
558 |
-
for cat in OPENROUTER_MODELS:
|
559 |
-
if cat["category"] == "Vision Models":
|
560 |
-
if any(m[0] == model_name for m in cat["models"]):
|
561 |
-
is_vision_model = True
|
562 |
-
break
|
563 |
-
# For other providers, use heuristics
|
564 |
-
elif provider == "OpenAI" and any(x in model_name.lower() for x in ["gpt-4", "gpt-4o"]):
|
565 |
-
is_vision_model = True
|
566 |
-
elif provider == "HuggingFace" and any(x in model_name.lower() for x in ["vl", "vision"]):
|
567 |
-
is_vision_model = True
|
568 |
-
|
569 |
-
vision_badge = '<span style="background-color: #4CAF50; color: white; padding: 3px 6px; border-radius: 3px; font-size: 0.8em; margin-left: 5px;">Vision</span>' if is_vision_model else ''
|
570 |
|
571 |
# For OpenRouter, show the model ID
|
572 |
model_id_html = f"<p><strong>Model ID:</strong> {model_id}</p>" if provider == "OpenRouter" else ""
|
@@ -581,7 +653,7 @@ def update_model_info(provider, model_name):
|
|
581 |
{model_id_html}
|
582 |
<p><strong>Context Size:</strong> {ctx_size:,} tokens</p>
|
583 |
<p><strong>Provider:</strong> {provider}</p>
|
584 |
-
{f'<p><strong>Features:</strong> Supports image understanding</p>' if
|
585 |
</div>
|
586 |
"""
|
587 |
|
@@ -802,29 +874,23 @@ def call_cohere_api(payload, api_key_override=None):
|
|
802 |
logger.error(f"Cohere API error: {str(e)}")
|
803 |
raise e
|
804 |
|
805 |
-
def
|
806 |
-
"""Make a call to
|
807 |
try:
|
808 |
if not HAS_OPENAI:
|
809 |
-
raise ImportError("OpenAI package not installed (required for
|
810 |
|
811 |
-
api_key = api_key_override if api_key_override else
|
812 |
if not api_key:
|
813 |
-
raise ValueError("
|
814 |
|
815 |
client = openai.OpenAI(
|
816 |
api_key=api_key,
|
817 |
-
base_url="https://
|
818 |
)
|
819 |
|
820 |
# Extract parameters from payload
|
821 |
-
|
822 |
-
# Add "hf:" prefix if not already there
|
823 |
-
if not model_name.startswith("hf:"):
|
824 |
-
model = f"hf:{model_name}"
|
825 |
-
else:
|
826 |
-
model = model_name
|
827 |
-
|
828 |
messages = payload.get("messages", [])
|
829 |
temperature = payload.get("temperature", 0.7)
|
830 |
max_tokens = payload.get("max_tokens", 1000)
|
@@ -841,7 +907,156 @@ def call_glhf_api(payload, api_key_override=None):
|
|
841 |
|
842 |
return response
|
843 |
except Exception as e:
|
844 |
-
logger.error(f"
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
845 |
raise e
|
846 |
|
847 |
def extract_ai_response(result, provider):
|
@@ -880,9 +1095,21 @@ def extract_ai_response(result, provider):
|
|
880 |
if hasattr(result, "text"):
|
881 |
return result.text
|
882 |
|
883 |
-
elif provider == "
|
884 |
if hasattr(result, "choices") and len(result.choices) > 0:
|
885 |
return result.choices[0].message.content
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
886 |
|
887 |
logger.error(f"Unexpected response structure from {provider}: {result}")
|
888 |
return f"Error: Could not extract response from {provider} API result"
|
@@ -969,7 +1196,7 @@ def groq_streaming_handler(response, chatbot, message_idx, message):
|
|
969 |
chatbot[-1][1] += f"\n\nError during streaming: {str(e)}"
|
970 |
yield chatbot
|
971 |
|
972 |
-
def
|
973 |
try:
|
974 |
# First add the user message if needed
|
975 |
if len(chatbot) == message_idx:
|
@@ -984,7 +1211,7 @@ def glhf_streaming_handler(response, chatbot, message_idx, message):
|
|
984 |
yield chatbot
|
985 |
|
986 |
except Exception as e:
|
987 |
-
logger.error(f"Error in
|
988 |
# Add error message to the current response
|
989 |
chatbot[-1][1] += f"\n\nError during streaming: {str(e)}"
|
990 |
yield chatbot
|
@@ -1279,28 +1506,28 @@ def ask_ai(message, history, provider, model_choice, temperature, max_tokens, to
|
|
1279 |
chat_history.append([message, error_message])
|
1280 |
return chat_history
|
1281 |
|
1282 |
-
elif provider == "
|
1283 |
# Get model ID from registry
|
1284 |
model_id, _ = get_model_info(provider, model_choice)
|
1285 |
if not model_id:
|
1286 |
-
error_message = f"Error: Model '{model_choice}' not found in
|
1287 |
chat_history.append([message, error_message])
|
1288 |
return chat_history
|
1289 |
|
1290 |
-
# Build
|
1291 |
payload = {
|
1292 |
-
"model": model_id,
|
1293 |
"messages": messages,
|
1294 |
"temperature": temperature,
|
1295 |
"max_tokens": max_tokens,
|
1296 |
"stream": stream_output
|
1297 |
}
|
1298 |
|
1299 |
-
# Call
|
1300 |
-
logger.info(f"Sending request to
|
1301 |
|
1302 |
try:
|
1303 |
-
response =
|
1304 |
|
1305 |
# Handle streaming response
|
1306 |
if stream_output:
|
@@ -1309,7 +1536,7 @@ def ask_ai(message, history, provider, model_choice, temperature, max_tokens, to
|
|
1309 |
|
1310 |
# Set up generator for streaming updates
|
1311 |
def streaming_generator():
|
1312 |
-
for updated_history in
|
1313 |
yield updated_history
|
1314 |
|
1315 |
return streaming_generator()
|
@@ -1320,7 +1547,104 @@ def ask_ai(message, history, provider, model_choice, temperature, max_tokens, to
|
|
1320 |
chat_history.append([message, ai_response])
|
1321 |
return chat_history
|
1322 |
except Exception as e:
|
1323 |
-
error_message = f"
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1324 |
logger.error(error_message)
|
1325 |
chat_history.append([message, error_message])
|
1326 |
return chat_history
|
@@ -1383,7 +1707,7 @@ def create_app():
|
|
1383 |
gr.Markdown("""
|
1384 |
# 🤖 Multi-Provider CrispChat
|
1385 |
|
1386 |
-
Chat with AI models from multiple providers: OpenRouter, OpenAI, HuggingFace, Groq, Cohere, and
|
1387 |
""")
|
1388 |
|
1389 |
with gr.Row():
|
@@ -1394,7 +1718,6 @@ def create_app():
|
|
1394 |
show_copy_button=True,
|
1395 |
show_label=False,
|
1396 |
avatar_images=(None, "https://upload.wikimedia.org/wikipedia/commons/0/04/ChatGPT_logo.svg"),
|
1397 |
-
type="messages",
|
1398 |
elem_id="chat-window"
|
1399 |
)
|
1400 |
|
@@ -1414,7 +1737,8 @@ def create_app():
|
|
1414 |
with gr.Column(scale=1):
|
1415 |
clear_btn = gr.Button("Clear Chat", variant="secondary")
|
1416 |
|
1417 |
-
|
|
|
1418 |
# Image upload
|
1419 |
with gr.Accordion("Upload Images (for vision models)", open=False):
|
1420 |
images = gr.File(
|
@@ -1443,7 +1767,7 @@ def create_app():
|
|
1443 |
|
1444 |
# Provider selection
|
1445 |
provider_choice = gr.Radio(
|
1446 |
-
choices=["OpenRouter", "OpenAI", "HuggingFace", "Groq", "Cohere", "
|
1447 |
value="OpenRouter",
|
1448 |
label="AI Provider"
|
1449 |
)
|
@@ -1506,11 +1830,35 @@ def create_app():
|
|
1506 |
visible=False
|
1507 |
)
|
1508 |
|
1509 |
-
|
1510 |
-
choices=list(
|
1511 |
-
value="
|
1512 |
-
label="
|
1513 |
-
elem_id="
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1514 |
visible=False
|
1515 |
)
|
1516 |
|
@@ -1652,6 +2000,12 @@ def create_app():
|
|
1652 |
model_info_display = gr.HTML(
|
1653 |
value=update_model_info("OpenRouter", OPENROUTER_ALL_MODELS[0][0])
|
1654 |
)
|
|
|
|
|
|
|
|
|
|
|
|
|
1655 |
|
1656 |
# Add usage instructions
|
1657 |
with gr.Accordion("Usage Instructions", open=False):
|
@@ -1671,7 +2025,10 @@ def create_app():
|
|
1671 |
- **HuggingFace**: Direct access to open models, some models require API key
|
1672 |
- **Groq**: High-performance inference, requires API key
|
1673 |
- **Cohere**: Specialized in language understanding, requires API key
|
1674 |
-
- **
|
|
|
|
|
|
|
1675 |
|
1676 |
## Advanced Parameters
|
1677 |
- **Temperature**: Controls randomness (higher = more creative, lower = more deterministic)
|
@@ -1683,23 +2040,26 @@ def create_app():
|
|
1683 |
# Add a footer with version info
|
1684 |
footer_md = gr.Markdown("""
|
1685 |
---
|
1686 |
-
### Multi-Provider CrispChat v1.
|
1687 |
Built with ❤️ using Gradio and multiple AI provider APIs | Context sizes shown next to model names
|
1688 |
""")
|
1689 |
|
1690 |
# Define event handlers
|
1691 |
def toggle_model_dropdowns(provider):
|
1692 |
"""Show/hide model dropdowns based on provider selection"""
|
1693 |
-
return
|
1694 |
-
gr.update(visible=(provider == "OpenRouter")),
|
1695 |
-
gr.update(visible=(provider == "OpenAI")),
|
1696 |
-
gr.update(visible=(provider == "HuggingFace")),
|
1697 |
-
gr.update(visible=(provider == "Groq")),
|
1698 |
-
gr.update(visible=(provider == "Cohere")),
|
1699 |
-
gr.update(visible=(provider == "
|
1700 |
-
|
|
|
|
|
|
|
1701 |
|
1702 |
-
def update_context_for_provider(provider, openrouter_model, openai_model, hf_model, groq_model, cohere_model,
|
1703 |
"""Update context display based on selected provider and model"""
|
1704 |
if provider == "OpenRouter":
|
1705 |
return update_context_display(provider, openrouter_model)
|
@@ -1711,11 +2071,17 @@ def create_app():
|
|
1711 |
return update_context_display(provider, groq_model)
|
1712 |
elif provider == "Cohere":
|
1713 |
return update_context_display(provider, cohere_model)
|
1714 |
-
elif provider == "
|
1715 |
-
return update_context_display(provider,
|
|
|
|
|
|
|
|
|
|
|
|
|
1716 |
return "Unknown"
|
1717 |
|
1718 |
-
def update_model_info_for_provider(provider, openrouter_model, openai_model, hf_model, groq_model, cohere_model,
|
1719 |
"""Update model info based on selected provider and model"""
|
1720 |
if provider == "OpenRouter":
|
1721 |
return update_model_info(provider, openrouter_model)
|
@@ -1727,79 +2093,165 @@ def create_app():
|
|
1727 |
return update_model_info(provider, groq_model)
|
1728 |
elif provider == "Cohere":
|
1729 |
return update_model_info(provider, cohere_model)
|
1730 |
-
elif provider == "
|
1731 |
-
return update_model_info(provider,
|
|
|
|
|
|
|
|
|
|
|
|
|
1732 |
return "<p>Model information not available</p>"
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1733 |
|
1734 |
-
|
1735 |
-
def
|
1736 |
-
"""Filter models
|
1737 |
-
|
|
|
|
|
|
|
|
|
1738 |
|
1739 |
-
if
|
1740 |
-
|
1741 |
-
|
1742 |
-
|
1743 |
-
else:
|
1744 |
-
filtered_models = all_models
|
1745 |
|
1746 |
-
|
|
|
|
|
|
|
|
|
1747 |
|
1748 |
-
|
1749 |
-
|
1750 |
-
|
1751 |
-
|
1752 |
-
|
1753 |
-
|
1754 |
|
1755 |
-
|
|
|
|
|
|
|
|
|
|
|
1756 |
|
1757 |
-
|
1758 |
-
|
1759 |
-
|
1760 |
-
|
1761 |
-
|
1762 |
-
|
|
|
1763 |
|
1764 |
-
|
|
|
|
|
|
|
|
|
|
|
1765 |
|
1766 |
-
|
1767 |
-
|
1768 |
-
|
1769 |
-
|
1770 |
-
|
1771 |
-
|
|
|
1772 |
|
1773 |
-
|
|
|
|
|
|
|
|
|
|
|
1774 |
|
1775 |
-
|
1776 |
-
|
1777 |
-
|
1778 |
-
|
1779 |
-
|
1780 |
-
|
|
|
1781 |
|
1782 |
-
|
|
|
|
|
|
|
|
|
|
|
1783 |
|
1784 |
-
|
1785 |
-
|
1786 |
-
|
1787 |
-
|
1788 |
-
|
1789 |
-
|
|
|
1790 |
|
1791 |
-
|
|
|
|
|
|
|
|
|
|
|
1792 |
|
1793 |
-
|
1794 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1795 |
|
1796 |
def refresh_groq_models_list():
|
1797 |
"""Refresh the list of Groq models"""
|
1798 |
global GROQ_MODELS
|
1799 |
GROQ_MODELS = fetch_groq_models()
|
1800 |
return gr.update(choices=list(GROQ_MODELS.keys()))
|
1801 |
-
|
1802 |
-
def get_current_model(provider, openrouter_model, openai_model, hf_model, groq_model, cohere_model,
|
1803 |
"""Get the currently selected model based on provider"""
|
1804 |
if provider == "OpenRouter":
|
1805 |
return openrouter_model
|
@@ -1811,8 +2263,14 @@ def create_app():
|
|
1811 |
return groq_model
|
1812 |
elif provider == "Cohere":
|
1813 |
return cohere_model
|
1814 |
-
elif provider == "
|
1815 |
-
return
|
|
|
|
|
|
|
|
|
|
|
|
|
1816 |
return None
|
1817 |
|
1818 |
# Process uploaded images
|
@@ -1826,26 +2284,63 @@ def create_app():
|
|
1826 |
provider_choice.change(
|
1827 |
fn=toggle_model_dropdowns,
|
1828 |
inputs=provider_choice,
|
1829 |
-
outputs=
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1830 |
).then(
|
1831 |
fn=update_context_for_provider,
|
1832 |
-
inputs=[provider_choice, openrouter_model, openai_model, hf_model, groq_model, cohere_model,
|
1833 |
outputs=context_display
|
1834 |
).then(
|
1835 |
fn=update_model_info_for_provider,
|
1836 |
-
inputs=[provider_choice, openrouter_model, openai_model, hf_model, groq_model, cohere_model,
|
1837 |
outputs=model_info_display
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1838 |
)
|
1839 |
|
1840 |
# Set up model search event - FIXED VERSION
|
1841 |
-
#
|
1842 |
model_search.change(
|
1843 |
-
fn=
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1844 |
inputs=[provider_choice, model_search],
|
1845 |
-
outputs=[
|
|
|
|
|
|
|
1846 |
)
|
1847 |
|
1848 |
-
# Set up model change events
|
1849 |
openrouter_model.change(
|
1850 |
fn=lambda model: update_context_display("OpenRouter", model),
|
1851 |
inputs=openrouter_model,
|
@@ -1854,6 +2349,14 @@ def create_app():
|
|
1854 |
fn=lambda model: update_model_info("OpenRouter", model),
|
1855 |
inputs=openrouter_model,
|
1856 |
outputs=model_info_display
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1857 |
)
|
1858 |
|
1859 |
openai_model.change(
|
@@ -1864,6 +2367,14 @@ def create_app():
|
|
1864 |
fn=lambda model: update_model_info("OpenAI", model),
|
1865 |
inputs=openai_model,
|
1866 |
outputs=model_info_display
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1867 |
)
|
1868 |
|
1869 |
hf_model.change(
|
@@ -1874,6 +2385,14 @@ def create_app():
|
|
1874 |
fn=lambda model: update_model_info("HuggingFace", model),
|
1875 |
inputs=hf_model,
|
1876 |
outputs=model_info_display
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1877 |
)
|
1878 |
|
1879 |
groq_model.change(
|
@@ -1884,6 +2403,14 @@ def create_app():
|
|
1884 |
fn=lambda model: update_model_info("Groq", model),
|
1885 |
inputs=groq_model,
|
1886 |
outputs=model_info_display
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1887 |
)
|
1888 |
|
1889 |
cohere_model.change(
|
@@ -1894,50 +2421,102 @@ def create_app():
|
|
1894 |
fn=lambda model: update_model_info("Cohere", model),
|
1895 |
inputs=cohere_model,
|
1896 |
outputs=model_info_display
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1897 |
)
|
1898 |
|
1899 |
-
|
1900 |
-
fn=lambda model: update_context_display("
|
1901 |
-
inputs=
|
1902 |
outputs=context_display
|
1903 |
).then(
|
1904 |
-
fn=lambda model: update_model_info("
|
1905 |
-
inputs=
|
1906 |
outputs=model_info_display
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1907 |
)
|
1908 |
|
1909 |
-
|
1910 |
-
|
1911 |
-
|
1912 |
-
|
1913 |
-
|
1914 |
-
|
1915 |
-
|
1916 |
-
|
1917 |
-
|
1918 |
-
|
1919 |
-
|
1920 |
-
|
1921 |
-
|
1922 |
-
|
1923 |
-
|
1924 |
-
|
1925 |
-
|
1926 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1927 |
|
1928 |
# Set up submission event
|
1929 |
-
def submit_message(message, history, provider, openrouter_model, openai_model, hf_model, groq_model, cohere_model,
|
1930 |
temperature, max_tokens, top_p, frequency_penalty, presence_penalty, repetition_penalty,
|
1931 |
top_k, min_p, seed, top_a, stream_output, response_format,
|
1932 |
images, documents, reasoning_effort, system_message, transforms, api_key_override):
|
1933 |
"""Submit message to selected provider and model"""
|
1934 |
# Get the currently selected model
|
1935 |
-
model_choice = get_current_model(provider, openrouter_model, openai_model, hf_model, groq_model, cohere_model,
|
1936 |
|
1937 |
# Check if model is selected
|
1938 |
if not model_choice:
|
1939 |
-
history.
|
1940 |
-
|
|
|
1941 |
|
1942 |
# Call the ask_ai function with the appropriate parameters
|
1943 |
return ask_ai(
|
@@ -1970,7 +2549,7 @@ def create_app():
|
|
1970 |
fn=submit_message,
|
1971 |
inputs=[
|
1972 |
message, chatbot, provider_choice,
|
1973 |
-
openrouter_model, openai_model, hf_model, groq_model, cohere_model,
|
1974 |
temperature, max_tokens, top_p, frequency_penalty, presence_penalty, repetition_penalty,
|
1975 |
top_k, min_p, seed, top_a, stream_output, response_format,
|
1976 |
images, documents, reasoning_effort, system_message, transforms, api_key_override
|
@@ -1988,7 +2567,7 @@ def create_app():
|
|
1988 |
fn=submit_message,
|
1989 |
inputs=[
|
1990 |
message, chatbot, provider_choice,
|
1991 |
-
openrouter_model, openai_model, hf_model, groq_model, cohere_model,
|
1992 |
temperature, max_tokens, top_p, frequency_penalty, presence_penalty, repetition_penalty,
|
1993 |
top_k, min_p, seed, top_a, stream_output, response_format,
|
1994 |
images, documents, reasoning_effort, system_message, transforms, api_key_override
|
@@ -2036,9 +2615,13 @@ if __name__ == "__main__":
|
|
2036 |
logger.warning("WARNING: COHERE_API_KEY environment variable is not set")
|
2037 |
missing_keys.append("Cohere")
|
2038 |
|
2039 |
-
if not
|
2040 |
-
logger.warning("WARNING:
|
2041 |
-
missing_keys.append("
|
|
|
|
|
|
|
|
|
2042 |
|
2043 |
if missing_keys:
|
2044 |
print("Missing API keys for the following providers:")
|
@@ -2049,6 +2632,9 @@ if __name__ == "__main__":
|
|
2049 |
|
2050 |
if "OpenRouter" in missing_keys:
|
2051 |
print("\nNote: OpenRouter offers free tier access to many models!")
|
|
|
|
|
|
|
2052 |
|
2053 |
print("\nStarting Multi-Provider CrispChat application...")
|
2054 |
demo = create_app()
|
|
|
71 |
OPENAI_API_KEY = os.environ.get("OPENAI_API_KEY", "")
|
72 |
GROQ_API_KEY = os.environ.get("GROQ_API_KEY", "")
|
73 |
COHERE_API_KEY = os.environ.get("COHERE_API_KEY", "")
|
|
|
74 |
HF_API_KEY = os.environ.get("HF_API_KEY", "")
|
75 |
+
TOGETHER_API_KEY = os.environ.get("TOGETHER_API_KEY", "")
|
76 |
+
GOOGLEAI_API_KEY = os.environ.get("GOOGLEAI_API_KEY", "")
|
77 |
+
|
78 |
+
# Print application startup message with timestamp
|
79 |
+
current_time = time.strftime("%Y-%m-%d %H:%M:%S")
|
80 |
+
print(f"===== Application Startup at {current_time} =====\n")
|
81 |
|
82 |
# ==========================================================
|
83 |
# MODEL DEFINITIONS
|
|
|
88 |
OPENROUTER_MODELS = [
|
89 |
# 1M+ Context Models
|
90 |
{"category": "1M+ Context", "models": [
|
91 |
+
("Google: Gemini Pro 2.0 Experimental", "google/gemini-2.0-pro-exp-02-05:free", 2000000),
|
92 |
("Google: Gemini 2.0 Flash Thinking Experimental 01-21", "google/gemini-2.0-flash-thinking-exp:free", 1048576),
|
93 |
("Google: Gemini Flash 2.0 Experimental", "google/gemini-2.0-flash-exp:free", 1048576),
|
94 |
("Google: Gemini Pro 2.5 Experimental", "google/gemini-2.5-pro-exp-03-25:free", 1000000),
|
|
|
169 |
|
170 |
# Vision-capable Models
|
171 |
{"category": "Vision Models", "models": [
|
172 |
+
("Google: Gemini Pro 2.0 Experimental", "google/gemini-2.0-pro-exp-02-05:free", 2000000),
|
173 |
("Google: Gemini 2.0 Flash Thinking Experimental 01-21", "google/gemini-2.0-flash-thinking-exp:free", 1048576),
|
174 |
("Google: Gemini Flash 2.0 Experimental", "google/gemini-2.0-flash-exp:free", 1048576),
|
175 |
("Google: Gemini Pro 2.5 Experimental", "google/gemini-2.5-pro-exp-03-25:free", 1000000),
|
|
|
198 |
if model not in OPENROUTER_ALL_MODELS: # Avoid duplicates
|
199 |
OPENROUTER_ALL_MODELS.append(model)
|
200 |
|
201 |
+
# VISION MODELS - For tracking which models support images
|
202 |
+
VISION_MODELS = {
|
203 |
+
"OpenRouter": [model[0] for model in OPENROUTER_MODELS[-1]["models"]], # Last category is Vision Models
|
204 |
+
"OpenAI": [
|
205 |
+
"gpt-4-vision-preview", "gpt-4o", "gpt-4o-mini", "gpt-4-turbo",
|
206 |
+
"gpt-4-turbo-preview", "gpt-4-0125-preview", "gpt-4-1106-preview",
|
207 |
+
"o1-preview", "o1-mini"
|
208 |
+
],
|
209 |
+
"HuggingFace": [
|
210 |
+
"Qwen/Qwen2.5-VL-7B-Instruct", "Qwen/qwen2.5-vl-3b-instruct",
|
211 |
+
"Qwen/qwen2.5-vl-32b-instruct", "Qwen/qwen2.5-vl-72b-instruct"
|
212 |
+
],
|
213 |
+
"Groq": ["llama-3.2-11b-vision", "llama-3.2-90b-vision"],
|
214 |
+
"Together": ["Llama-3.2-11B-Vision-Instruct", "Llama-3.2-90B-Vision-Instruct"],
|
215 |
+
"OVH": ["llava-next-mistral-7b", "qwen2.5-vl-72b-instruct"],
|
216 |
+
"Cerebras": [],
|
217 |
+
"GoogleAI": ["gemini-1.5-pro", "gemini-1.0-pro", "gemini-1.5-flash", "gemini-2.0-pro", "gemini-2.5-pro"]
|
218 |
+
}
|
219 |
+
|
220 |
+
# Add all models with "vl", "vision", "visual" in their name to HF vision models
|
221 |
+
for model_name in list(HUGGINGFACE_MODELS.keys()):
|
222 |
+
if any(x in model_name.lower() for x in ["vl", "vision", "visual", "llava"]):
|
223 |
+
if model_name not in VISION_MODELS["HuggingFace"]:
|
224 |
+
VISION_MODELS["HuggingFace"].append(model_name)
|
225 |
+
|
226 |
# OPENAI MODELS
|
227 |
OPENAI_MODELS = {
|
228 |
"gpt-3.5-turbo": 16385,
|
|
|
241 |
"gpt-4o-2024-11-20": 128000,
|
242 |
"gpt-4o-2024-08-06": 128000,
|
243 |
"gpt-4o-2024-05-13": 128000,
|
|
|
244 |
"gpt-4o-mini": 128000,
|
245 |
"gpt-4o-mini-2024-07-18": 128000,
|
|
|
|
|
|
|
|
|
246 |
"o1-preview": 128000,
|
247 |
"o1-preview-2024-09-12": 128000,
|
248 |
"o1-mini": 128000,
|
|
|
258 |
"mistralai/Mistral-7B-Instruct-v0.3": 32768,
|
259 |
"NousResearch/Nous-Hermes-2-Mixtral-8x7B-DPO": 32768,
|
260 |
"microsoft/Phi-3.5-mini-instruct": 4096,
|
|
|
261 |
"google/gemma-2-2b-it": 2048,
|
262 |
"openai-community/gpt2": 1024,
|
263 |
"microsoft/phi-2": 2048,
|
|
|
269 |
"Qwen/Qwen2.5-7B-Instruct": 131072,
|
270 |
"tiiuae/falcon-7b-instruct": 8192,
|
271 |
"Qwen/QwQ-32B-preview": 32768,
|
272 |
+
"Qwen/Qwen2.5-VL-7B-Instruct": 64000,
|
273 |
+
"Qwen/qwen2.5-vl-3b-instruct": 64000,
|
274 |
+
"Qwen/qwen2.5-vl-32b-instruct": 8192,
|
275 |
+
"Qwen/qwen2.5-vl-72b-instruct": 131072,
|
276 |
}
|
277 |
|
278 |
# GROQ MODELS - We'll populate this dynamically
|
|
|
292 |
"llama-3.1-70b-specdec": 131072,
|
293 |
"llama-3.2-1b-preview": 131072,
|
294 |
"llama-3.2-3b-preview": 131072,
|
295 |
+
"llama-3.2-11b-vision": 131072,
|
296 |
+
"llama-3.2-90b-vision": 131072,
|
297 |
}
|
298 |
|
299 |
# COHERE MODELS
|
|
|
312 |
"c4ai-aya-expanse-32b": 131072,
|
313 |
}
|
314 |
|
315 |
+
# TOGETHER MODELS
|
316 |
+
TOGETHER_MODELS = {
|
317 |
+
"meta-llama/Llama-3.1-70B-Instruct": 131072,
|
318 |
+
"meta-llama/Llama-3.1-8B-Instruct": 131072,
|
319 |
+
"meta-llama/Llama-3.3-70B-Instruct": 131072,
|
320 |
+
"deepseek-ai/deepseek-r1-distill-llama-70b": 8192,
|
321 |
+
"meta-llama/Llama-3.2-11B-Vision-Instruct": 131072,
|
322 |
+
"meta-llama/Llama-3.2-90B-Vision-Instruct": 131072,
|
323 |
+
}
|
324 |
+
|
325 |
+
# OVH MODELS - OVH AI Endpoints (free beta)
|
326 |
+
OVH_MODELS = {
|
327 |
+
"ovh/codestral-mamba-7b-v0.1": 131072,
|
328 |
+
"ovh/deepseek-r1-distill-llama-70b": 8192,
|
329 |
+
"ovh/llama-3.1-70b-instruct": 131072,
|
330 |
+
"ovh/llama-3.1-8b-instruct": 131072,
|
331 |
+
"ovh/llama-3.3-70b-instruct": 131072,
|
332 |
+
"ovh/llava-next-mistral-7b": 8192,
|
333 |
+
"ovh/mistral-7b-instruct-v0.3": 32768,
|
334 |
+
"ovh/mistral-nemo-2407": 131072,
|
335 |
+
"ovh/mixtral-8x7b-instruct": 32768,
|
336 |
+
"ovh/qwen2.5-coder-32b-instruct": 32768,
|
337 |
+
"ovh/qwen2.5-vl-72b-instruct": 131072,
|
338 |
+
}
|
339 |
+
|
340 |
+
# CEREBRAS MODELS
|
341 |
+
CEREBRAS_MODELS = {
|
342 |
+
"cerebras/llama-3.1-8b": 8192,
|
343 |
+
"cerebras/llama-3.3-70b": 8192,
|
344 |
+
}
|
345 |
+
|
346 |
+
# GOOGLE AI MODELS
|
347 |
+
GOOGLEAI_MODELS = {
|
348 |
+
"gemini-1.0-pro": 32768,
|
349 |
+
"gemini-1.5-flash": 1000000,
|
350 |
+
"gemini-1.5-pro": 1000000,
|
351 |
+
"gemini-2.0-pro": 2000000,
|
352 |
+
"gemini-2.5-pro": 2000000,
|
353 |
}
|
354 |
|
355 |
# ==========================================================
|
|
|
379 |
context_size = 32768
|
380 |
elif "gemma" in model_id:
|
381 |
context_size = 8192
|
382 |
+
elif "vision" in model_id:
|
383 |
+
context_size = 131072
|
384 |
else:
|
385 |
context_size = 8192 # Default assumption
|
386 |
|
|
|
558 |
all_models = list(GROQ_MODELS.keys())
|
559 |
elif provider == "Cohere":
|
560 |
all_models = list(COHERE_MODELS.keys())
|
561 |
+
elif provider == "Together":
|
562 |
+
all_models = list(TOGETHER_MODELS.keys())
|
563 |
+
elif provider == "OVH":
|
564 |
+
all_models = list(OVH_MODELS.keys())
|
565 |
+
elif provider == "Cerebras":
|
566 |
+
all_models = list(CEREBRAS_MODELS.keys())
|
567 |
+
elif provider == "GoogleAI":
|
568 |
+
all_models = list(GOOGLEAI_MODELS.keys())
|
569 |
else:
|
570 |
return [], None
|
571 |
|
|
|
597 |
elif provider == "Cohere":
|
598 |
if model_choice in COHERE_MODELS:
|
599 |
return model_choice, COHERE_MODELS[model_choice]
|
600 |
+
elif provider == "Together":
|
601 |
+
if model_choice in TOGETHER_MODELS:
|
602 |
+
return model_choice, TOGETHER_MODELS[model_choice]
|
603 |
+
elif provider == "OVH":
|
604 |
+
if model_choice in OVH_MODELS:
|
605 |
+
return model_choice, OVH_MODELS[model_choice]
|
606 |
+
elif provider == "Cerebras":
|
607 |
+
if model_choice in CEREBRAS_MODELS:
|
608 |
+
return model_choice, CEREBRAS_MODELS[model_choice]
|
609 |
+
elif provider == "GoogleAI":
|
610 |
+
if model_choice in GOOGLEAI_MODELS:
|
611 |
+
return model_choice, GOOGLEAI_MODELS[model_choice]
|
612 |
|
613 |
return None, 0
|
614 |
|
|
|
617 |
_, ctx_size = get_model_info(provider, model_name)
|
618 |
return f"{ctx_size:,}" if ctx_size else "Unknown"
|
619 |
|
620 |
+
def is_vision_model(provider, model_name):
|
621 |
+
"""Check if a model supports vision/images"""
|
622 |
+
if provider in VISION_MODELS:
|
623 |
+
if model_name in VISION_MODELS[provider]:
|
624 |
+
return True
|
625 |
+
|
626 |
+
# Also check for common vision indicators in model names
|
627 |
+
if any(x in model_name.lower() for x in ["vl", "vision", "visual", "llava", "gemini"]):
|
628 |
+
return True
|
629 |
+
|
630 |
+
return False
|
631 |
+
|
632 |
def update_model_info(provider, model_name):
|
633 |
"""Generate HTML info display for the selected model"""
|
634 |
model_id, ctx_size = get_model_info(provider, model_name)
|
|
|
636 |
return "<p>Model information not available</p>"
|
637 |
|
638 |
# Check if this is a vision model
|
639 |
+
is_vision = is_vision_model(provider, model_name)
|
640 |
|
641 |
+
vision_badge = '<span style="background-color: #4CAF50; color: white; padding: 3px 6px; border-radius: 3px; font-size: 0.8em; margin-left: 5px;">Vision</span>' if is_vision else ''
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
642 |
|
643 |
# For OpenRouter, show the model ID
|
644 |
model_id_html = f"<p><strong>Model ID:</strong> {model_id}</p>" if provider == "OpenRouter" else ""
|
|
|
653 |
{model_id_html}
|
654 |
<p><strong>Context Size:</strong> {ctx_size:,} tokens</p>
|
655 |
<p><strong>Provider:</strong> {provider}</p>
|
656 |
+
{f'<p><strong>Features:</strong> Supports image understanding</p>' if is_vision else ''}
|
657 |
</div>
|
658 |
"""
|
659 |
|
|
|
874 |
logger.error(f"Cohere API error: {str(e)}")
|
875 |
raise e
|
876 |
|
877 |
+
def call_together_api(payload, api_key_override=None):
|
878 |
+
"""Make a call to Together API with error handling"""
|
879 |
try:
|
880 |
if not HAS_OPENAI:
|
881 |
+
raise ImportError("OpenAI package not installed (required for Together API)")
|
882 |
|
883 |
+
api_key = api_key_override if api_key_override else TOGETHER_API_KEY
|
884 |
if not api_key:
|
885 |
+
raise ValueError("Together API key is required")
|
886 |
|
887 |
client = openai.OpenAI(
|
888 |
api_key=api_key,
|
889 |
+
base_url="https://api.together.xyz/v1"
|
890 |
)
|
891 |
|
892 |
# Extract parameters from payload
|
893 |
+
model = payload.get("model", "meta-llama/Llama-3.1-8B-Instruct")
|
|
|
|
|
|
|
|
|
|
|
|
|
894 |
messages = payload.get("messages", [])
|
895 |
temperature = payload.get("temperature", 0.7)
|
896 |
max_tokens = payload.get("max_tokens", 1000)
|
|
|
907 |
|
908 |
return response
|
909 |
except Exception as e:
|
910 |
+
logger.error(f"Together API error: {str(e)}")
|
911 |
+
raise e
|
912 |
+
|
913 |
+
def call_ovh_api(payload, api_key_override=None):
|
914 |
+
"""Make a call to OVH AI Endpoints API with error handling"""
|
915 |
+
try:
|
916 |
+
# Use custom OpenAI client with the OVH endpoint
|
917 |
+
model = payload.get("model", "ovh/llama-3.1-8b-instruct")
|
918 |
+
messages = payload.get("messages", [])
|
919 |
+
temperature = payload.get("temperature", 0.7)
|
920 |
+
max_tokens = payload.get("max_tokens", 1000)
|
921 |
+
|
922 |
+
headers = {
|
923 |
+
"Content-Type": "application/json"
|
924 |
+
}
|
925 |
+
|
926 |
+
data = {
|
927 |
+
"model": model,
|
928 |
+
"messages": messages,
|
929 |
+
"temperature": temperature,
|
930 |
+
"max_tokens": max_tokens
|
931 |
+
}
|
932 |
+
|
933 |
+
response = requests.post(
|
934 |
+
"https://endpoints.ai.cloud.ovh.net/v1/chat/completions",
|
935 |
+
headers=headers,
|
936 |
+
json=data
|
937 |
+
)
|
938 |
+
|
939 |
+
if response.status_code != 200:
|
940 |
+
raise ValueError(f"OVH API returned status code {response.status_code}: {response.text}")
|
941 |
+
|
942 |
+
return response.json()
|
943 |
+
except Exception as e:
|
944 |
+
logger.error(f"OVH API error: {str(e)}")
|
945 |
+
raise e
|
946 |
+
|
947 |
+
def call_cerebras_api(payload, api_key_override=None):
|
948 |
+
"""Make a call to Cerebras API with error handling"""
|
949 |
+
try:
|
950 |
+
# Use vanilla requests for this API
|
951 |
+
model = payload.get("model", "cerebras/llama-3.1-8b")
|
952 |
+
messages = payload.get("messages", [])
|
953 |
+
temperature = payload.get("temperature", 0.7)
|
954 |
+
max_tokens = payload.get("max_tokens", 1000)
|
955 |
+
|
956 |
+
headers = {
|
957 |
+
"Content-Type": "application/json"
|
958 |
+
}
|
959 |
+
|
960 |
+
data = {
|
961 |
+
"model": model,
|
962 |
+
"messages": messages,
|
963 |
+
"temperature": temperature,
|
964 |
+
"max_tokens": max_tokens
|
965 |
+
}
|
966 |
+
|
967 |
+
response = requests.post(
|
968 |
+
"https://api.cloud.cerebras.ai/v1/chat/completions",
|
969 |
+
headers=headers,
|
970 |
+
json=data
|
971 |
+
)
|
972 |
+
|
973 |
+
if response.status_code != 200:
|
974 |
+
raise ValueError(f"Cerebras API returned status code {response.status_code}: {response.text}")
|
975 |
+
|
976 |
+
return response.json()
|
977 |
+
except Exception as e:
|
978 |
+
logger.error(f"Cerebras API error: {str(e)}")
|
979 |
+
raise e
|
980 |
+
|
981 |
+
def call_googleai_api(payload, api_key_override=None):
|
982 |
+
"""Make a call to Google AI (Gemini) API with error handling"""
|
983 |
+
try:
|
984 |
+
from google.generativeai import configure, GenerativeModel
|
985 |
+
|
986 |
+
api_key = api_key_override if api_key_override else GOOGLEAI_API_KEY
|
987 |
+
if not api_key:
|
988 |
+
raise ValueError("Google AI API key is required")
|
989 |
+
|
990 |
+
configure(api_key=api_key)
|
991 |
+
|
992 |
+
# Extract parameters from payload
|
993 |
+
model_name = payload.get("model", "gemini-1.5-pro")
|
994 |
+
messages = payload.get("messages", [])
|
995 |
+
temperature = payload.get("temperature", 0.7)
|
996 |
+
|
997 |
+
# Convert messages to Google AI format
|
998 |
+
google_messages = []
|
999 |
+
for msg in messages:
|
1000 |
+
role = msg["role"]
|
1001 |
+
content = msg["content"]
|
1002 |
+
|
1003 |
+
# Skip system messages for now (Gemini doesn't support them directly)
|
1004 |
+
if role == "system":
|
1005 |
+
continue
|
1006 |
+
|
1007 |
+
# Map user/assistant roles to Google's roles
|
1008 |
+
gemini_role = "user" if role == "user" else "model"
|
1009 |
+
|
1010 |
+
# Process content (text or multimodal)
|
1011 |
+
if isinstance(content, list):
|
1012 |
+
# Multimodal content handling for Gemini
|
1013 |
+
parts = []
|
1014 |
+
for item in content:
|
1015 |
+
if item["type"] == "text":
|
1016 |
+
parts.append({"text": item["text"]})
|
1017 |
+
elif item["type"] == "image_url":
|
1018 |
+
image_data = item["image_url"]["url"]
|
1019 |
+
if image_data.startswith("data:"):
|
1020 |
+
# Extract base64 data
|
1021 |
+
mime, base64_data = image_data.split(";base64,")
|
1022 |
+
mime_type = mime.split(":")[1]
|
1023 |
+
parts.append({
|
1024 |
+
"inline_data": {
|
1025 |
+
"mime_type": mime_type,
|
1026 |
+
"data": base64_data
|
1027 |
+
}
|
1028 |
+
})
|
1029 |
+
google_messages.append({"role": gemini_role, "parts": parts})
|
1030 |
+
else:
|
1031 |
+
# Simple text content
|
1032 |
+
google_messages.append({"role": gemini_role, "parts": [{"text": content}]})
|
1033 |
+
|
1034 |
+
# Create Gemini model
|
1035 |
+
model = GenerativeModel(model_name)
|
1036 |
+
|
1037 |
+
# Generate content
|
1038 |
+
response = model.generate_content(
|
1039 |
+
google_messages,
|
1040 |
+
generation_config={
|
1041 |
+
"temperature": temperature,
|
1042 |
+
"max_output_tokens": payload.get("max_tokens", 1000),
|
1043 |
+
"top_p": payload.get("top_p", 0.95),
|
1044 |
+
}
|
1045 |
+
)
|
1046 |
+
|
1047 |
+
# Convert response to standard format
|
1048 |
+
return {
|
1049 |
+
"choices": [
|
1050 |
+
{
|
1051 |
+
"message": {
|
1052 |
+
"role": "assistant",
|
1053 |
+
"content": response.text
|
1054 |
+
}
|
1055 |
+
}
|
1056 |
+
]
|
1057 |
+
}
|
1058 |
+
except Exception as e:
|
1059 |
+
logger.error(f"Google AI API error: {str(e)}")
|
1060 |
raise e
|
1061 |
|
1062 |
def extract_ai_response(result, provider):
|
|
|
1095 |
if hasattr(result, "text"):
|
1096 |
return result.text
|
1097 |
|
1098 |
+
elif provider == "Together":
|
1099 |
if hasattr(result, "choices") and len(result.choices) > 0:
|
1100 |
return result.choices[0].message.content
|
1101 |
+
|
1102 |
+
elif provider == "OVH":
|
1103 |
+
if isinstance(result, dict) and "choices" in result and len(result["choices"]) > 0:
|
1104 |
+
return result["choices"][0]["message"]["content"]
|
1105 |
+
|
1106 |
+
elif provider == "Cerebras":
|
1107 |
+
if isinstance(result, dict) and "choices" in result and len(result["choices"]) > 0:
|
1108 |
+
return result["choices"][0]["message"]["content"]
|
1109 |
+
|
1110 |
+
elif provider == "GoogleAI":
|
1111 |
+
if isinstance(result, dict) and "choices" in result and len(result["choices"]) > 0:
|
1112 |
+
return result["choices"][0]["message"]["content"]
|
1113 |
|
1114 |
logger.error(f"Unexpected response structure from {provider}: {result}")
|
1115 |
return f"Error: Could not extract response from {provider} API result"
|
|
|
1196 |
chatbot[-1][1] += f"\n\nError during streaming: {str(e)}"
|
1197 |
yield chatbot
|
1198 |
|
1199 |
+
def together_streaming_handler(response, chatbot, message_idx, message):
|
1200 |
try:
|
1201 |
# First add the user message if needed
|
1202 |
if len(chatbot) == message_idx:
|
|
|
1211 |
yield chatbot
|
1212 |
|
1213 |
except Exception as e:
|
1214 |
+
logger.error(f"Error in Together streaming handler: {str(e)}")
|
1215 |
# Add error message to the current response
|
1216 |
chatbot[-1][1] += f"\n\nError during streaming: {str(e)}"
|
1217 |
yield chatbot
|
|
|
1506 |
chat_history.append([message, error_message])
|
1507 |
return chat_history
|
1508 |
|
1509 |
+
elif provider == "Together":
|
1510 |
# Get model ID from registry
|
1511 |
model_id, _ = get_model_info(provider, model_choice)
|
1512 |
if not model_id:
|
1513 |
+
error_message = f"Error: Model '{model_choice}' not found in Together"
|
1514 |
chat_history.append([message, error_message])
|
1515 |
return chat_history
|
1516 |
|
1517 |
+
# Build Together payload
|
1518 |
payload = {
|
1519 |
+
"model": model_id,
|
1520 |
"messages": messages,
|
1521 |
"temperature": temperature,
|
1522 |
"max_tokens": max_tokens,
|
1523 |
"stream": stream_output
|
1524 |
}
|
1525 |
|
1526 |
+
# Call Together API
|
1527 |
+
logger.info(f"Sending request to Together model: {model_id}")
|
1528 |
|
1529 |
try:
|
1530 |
+
response = call_together_api(payload, api_key_override)
|
1531 |
|
1532 |
# Handle streaming response
|
1533 |
if stream_output:
|
|
|
1536 |
|
1537 |
# Set up generator for streaming updates
|
1538 |
def streaming_generator():
|
1539 |
+
for updated_history in together_streaming_handler(response, chat_history, len(chat_history) - 1, message):
|
1540 |
yield updated_history
|
1541 |
|
1542 |
return streaming_generator()
|
|
|
1547 |
chat_history.append([message, ai_response])
|
1548 |
return chat_history
|
1549 |
except Exception as e:
|
1550 |
+
error_message = f"Together API Error: {str(e)}"
|
1551 |
+
logger.error(error_message)
|
1552 |
+
chat_history.append([message, error_message])
|
1553 |
+
return chat_history
|
1554 |
+
|
1555 |
+
elif provider == "OVH":
|
1556 |
+
# Get model ID from registry
|
1557 |
+
model_id, _ = get_model_info(provider, model_choice)
|
1558 |
+
if not model_id:
|
1559 |
+
error_message = f"Error: Model '{model_choice}' not found in OVH"
|
1560 |
+
chat_history.append([message, error_message])
|
1561 |
+
return chat_history
|
1562 |
+
|
1563 |
+
# Build OVH payload
|
1564 |
+
payload = {
|
1565 |
+
"model": model_id,
|
1566 |
+
"messages": messages,
|
1567 |
+
"temperature": temperature,
|
1568 |
+
"max_tokens": max_tokens
|
1569 |
+
}
|
1570 |
+
|
1571 |
+
# Call OVH API
|
1572 |
+
logger.info(f"Sending request to OVH model: {model_id}")
|
1573 |
+
|
1574 |
+
try:
|
1575 |
+
response = call_ovh_api(payload)
|
1576 |
+
|
1577 |
+
# Extract response
|
1578 |
+
ai_response = extract_ai_response(response, provider)
|
1579 |
+
chat_history.append([message, ai_response])
|
1580 |
+
return chat_history
|
1581 |
+
except Exception as e:
|
1582 |
+
error_message = f"OVH API Error: {str(e)}"
|
1583 |
+
logger.error(error_message)
|
1584 |
+
chat_history.append([message, error_message])
|
1585 |
+
return chat_history
|
1586 |
+
|
1587 |
+
elif provider == "Cerebras":
|
1588 |
+
# Get model ID from registry
|
1589 |
+
model_id, _ = get_model_info(provider, model_choice)
|
1590 |
+
if not model_id:
|
1591 |
+
error_message = f"Error: Model '{model_choice}' not found in Cerebras"
|
1592 |
+
chat_history.append([message, error_message])
|
1593 |
+
return chat_history
|
1594 |
+
|
1595 |
+
# Build Cerebras payload
|
1596 |
+
payload = {
|
1597 |
+
"model": model_id,
|
1598 |
+
"messages": messages,
|
1599 |
+
"temperature": temperature,
|
1600 |
+
"max_tokens": max_tokens
|
1601 |
+
}
|
1602 |
+
|
1603 |
+
# Call Cerebras API
|
1604 |
+
logger.info(f"Sending request to Cerebras model: {model_id}")
|
1605 |
+
|
1606 |
+
try:
|
1607 |
+
response = call_cerebras_api(payload)
|
1608 |
+
|
1609 |
+
# Extract response
|
1610 |
+
ai_response = extract_ai_response(response, provider)
|
1611 |
+
chat_history.append([message, ai_response])
|
1612 |
+
return chat_history
|
1613 |
+
except Exception as e:
|
1614 |
+
error_message = f"Cerebras API Error: {str(e)}"
|
1615 |
+
logger.error(error_message)
|
1616 |
+
chat_history.append([message, error_message])
|
1617 |
+
return chat_history
|
1618 |
+
|
1619 |
+
elif provider == "GoogleAI":
|
1620 |
+
# Get model ID from registry
|
1621 |
+
model_id, _ = get_model_info(provider, model_choice)
|
1622 |
+
if not model_id:
|
1623 |
+
error_message = f"Error: Model '{model_choice}' not found in GoogleAI"
|
1624 |
+
chat_history.append([message, error_message])
|
1625 |
+
return chat_history
|
1626 |
+
|
1627 |
+
# Build GoogleAI payload
|
1628 |
+
payload = {
|
1629 |
+
"model": model_id,
|
1630 |
+
"messages": messages,
|
1631 |
+
"temperature": temperature,
|
1632 |
+
"max_tokens": max_tokens,
|
1633 |
+
"top_p": top_p
|
1634 |
+
}
|
1635 |
+
|
1636 |
+
# Call GoogleAI API
|
1637 |
+
logger.info(f"Sending request to GoogleAI model: {model_id}")
|
1638 |
+
|
1639 |
+
try:
|
1640 |
+
response = call_googleai_api(payload, api_key_override)
|
1641 |
+
|
1642 |
+
# Extract response
|
1643 |
+
ai_response = extract_ai_response(response, provider)
|
1644 |
+
chat_history.append([message, ai_response])
|
1645 |
+
return chat_history
|
1646 |
+
except Exception as e:
|
1647 |
+
error_message = f"GoogleAI API Error: {str(e)}"
|
1648 |
logger.error(error_message)
|
1649 |
chat_history.append([message, error_message])
|
1650 |
return chat_history
|
|
|
1707 |
gr.Markdown("""
|
1708 |
# 🤖 Multi-Provider CrispChat
|
1709 |
|
1710 |
+
Chat with AI models from multiple providers: OpenRouter, OpenAI, HuggingFace, Groq, Cohere, Together, OVH, Cerebras, and Google AI.
|
1711 |
""")
|
1712 |
|
1713 |
with gr.Row():
|
|
|
1718 |
show_copy_button=True,
|
1719 |
show_label=False,
|
1720 |
avatar_images=(None, "https://upload.wikimedia.org/wikipedia/commons/0/04/ChatGPT_logo.svg"),
|
|
|
1721 |
elem_id="chat-window"
|
1722 |
)
|
1723 |
|
|
|
1737 |
with gr.Column(scale=1):
|
1738 |
clear_btn = gr.Button("Clear Chat", variant="secondary")
|
1739 |
|
1740 |
+
# Container for conditionally showing image upload
|
1741 |
+
with gr.Row(visible=True) as image_upload_container:
|
1742 |
# Image upload
|
1743 |
with gr.Accordion("Upload Images (for vision models)", open=False):
|
1744 |
images = gr.File(
|
|
|
1767 |
|
1768 |
# Provider selection
|
1769 |
provider_choice = gr.Radio(
|
1770 |
+
choices=["OpenRouter", "OpenAI", "HuggingFace", "Groq", "Cohere", "Together", "OVH", "Cerebras", "GoogleAI"],
|
1771 |
value="OpenRouter",
|
1772 |
label="AI Provider"
|
1773 |
)
|
|
|
1830 |
visible=False
|
1831 |
)
|
1832 |
|
1833 |
+
together_model = gr.Dropdown(
|
1834 |
+
choices=list(TOGETHER_MODELS.keys()),
|
1835 |
+
value="meta-llama/Llama-3.1-8B-Instruct" if "meta-llama/Llama-3.1-8B-Instruct" in TOGETHER_MODELS else None,
|
1836 |
+
label="Together Model",
|
1837 |
+
elem_id="together-model-choice",
|
1838 |
+
visible=False
|
1839 |
+
)
|
1840 |
+
|
1841 |
+
ovh_model = gr.Dropdown(
|
1842 |
+
choices=list(OVH_MODELS.keys()),
|
1843 |
+
value="ovh/llama-3.1-8b-instruct" if "ovh/llama-3.1-8b-instruct" in OVH_MODELS else None,
|
1844 |
+
label="OVH Model",
|
1845 |
+
elem_id="ovh-model-choice",
|
1846 |
+
visible=False
|
1847 |
+
)
|
1848 |
+
|
1849 |
+
cerebras_model = gr.Dropdown(
|
1850 |
+
choices=list(CEREBRAS_MODELS.keys()),
|
1851 |
+
value="cerebras/llama-3.1-8b" if "cerebras/llama-3.1-8b" in CEREBRAS_MODELS else None,
|
1852 |
+
label="Cerebras Model",
|
1853 |
+
elem_id="cerebras-model-choice",
|
1854 |
+
visible=False
|
1855 |
+
)
|
1856 |
+
|
1857 |
+
googleai_model = gr.Dropdown(
|
1858 |
+
choices=list(GOOGLEAI_MODELS.keys()),
|
1859 |
+
value="gemini-1.5-pro" if "gemini-1.5-pro" in GOOGLEAI_MODELS else None,
|
1860 |
+
label="Google AI Model",
|
1861 |
+
elem_id="googleai-model-choice",
|
1862 |
visible=False
|
1863 |
)
|
1864 |
|
|
|
2000 |
model_info_display = gr.HTML(
|
2001 |
value=update_model_info("OpenRouter", OPENROUTER_ALL_MODELS[0][0])
|
2002 |
)
|
2003 |
+
|
2004 |
+
is_vision_indicator = gr.Checkbox(
|
2005 |
+
label="Supports Images",
|
2006 |
+
value=is_vision_model("OpenRouter", OPENROUTER_ALL_MODELS[0][0]),
|
2007 |
+
interactive=False
|
2008 |
+
)
|
2009 |
|
2010 |
# Add usage instructions
|
2011 |
with gr.Accordion("Usage Instructions", open=False):
|
|
|
2025 |
- **HuggingFace**: Direct access to open models, some models require API key
|
2026 |
- **Groq**: High-performance inference, requires API key
|
2027 |
- **Cohere**: Specialized in language understanding, requires API key
|
2028 |
+
- **Together**: Access to high-quality open models, requires API key
|
2029 |
+
- **OVH**: Free beta access to several models
|
2030 |
+
- **Cerebras**: Free tier available with 8K context limit
|
2031 |
+
- **GoogleAI**: Google's Gemini models, requires API key
|
2032 |
|
2033 |
## Advanced Parameters
|
2034 |
- **Temperature**: Controls randomness (higher = more creative, lower = more deterministic)
|
|
|
2040 |
# Add a footer with version info
|
2041 |
footer_md = gr.Markdown("""
|
2042 |
---
|
2043 |
+
### Multi-Provider CrispChat v1.1
|
2044 |
Built with ❤️ using Gradio and multiple AI provider APIs | Context sizes shown next to model names
|
2045 |
""")
|
2046 |
|
2047 |
# Define event handlers
|
2048 |
def toggle_model_dropdowns(provider):
|
2049 |
"""Show/hide model dropdowns based on provider selection"""
|
2050 |
+
return {
|
2051 |
+
openrouter_model: gr.update(visible=(provider == "OpenRouter")),
|
2052 |
+
openai_model: gr.update(visible=(provider == "OpenAI")),
|
2053 |
+
hf_model: gr.update(visible=(provider == "HuggingFace")),
|
2054 |
+
groq_model: gr.update(visible=(provider == "Groq")),
|
2055 |
+
cohere_model: gr.update(visible=(provider == "Cohere")),
|
2056 |
+
together_model: gr.update(visible=(provider == "Together")),
|
2057 |
+
ovh_model: gr.update(visible=(provider == "OVH")),
|
2058 |
+
cerebras_model: gr.update(visible=(provider == "Cerebras")),
|
2059 |
+
googleai_model: gr.update(visible=(provider == "GoogleAI"))
|
2060 |
+
}
|
2061 |
|
2062 |
+
def update_context_for_provider(provider, openrouter_model, openai_model, hf_model, groq_model, cohere_model, together_model, ovh_model, cerebras_model, googleai_model):
|
2063 |
"""Update context display based on selected provider and model"""
|
2064 |
if provider == "OpenRouter":
|
2065 |
return update_context_display(provider, openrouter_model)
|
|
|
2071 |
return update_context_display(provider, groq_model)
|
2072 |
elif provider == "Cohere":
|
2073 |
return update_context_display(provider, cohere_model)
|
2074 |
+
elif provider == "Together":
|
2075 |
+
return update_context_display(provider, together_model)
|
2076 |
+
elif provider == "OVH":
|
2077 |
+
return update_context_display(provider, ovh_model)
|
2078 |
+
elif provider == "Cerebras":
|
2079 |
+
return update_context_display(provider, cerebras_model)
|
2080 |
+
elif provider == "GoogleAI":
|
2081 |
+
return update_context_display(provider, googleai_model)
|
2082 |
return "Unknown"
|
2083 |
|
2084 |
+
def update_model_info_for_provider(provider, openrouter_model, openai_model, hf_model, groq_model, cohere_model, together_model, ovh_model, cerebras_model, googleai_model):
|
2085 |
"""Update model info based on selected provider and model"""
|
2086 |
if provider == "OpenRouter":
|
2087 |
return update_model_info(provider, openrouter_model)
|
|
|
2093 |
return update_model_info(provider, groq_model)
|
2094 |
elif provider == "Cohere":
|
2095 |
return update_model_info(provider, cohere_model)
|
2096 |
+
elif provider == "Together":
|
2097 |
+
return update_model_info(provider, together_model)
|
2098 |
+
elif provider == "OVH":
|
2099 |
+
return update_model_info(provider, ovh_model)
|
2100 |
+
elif provider == "Cerebras":
|
2101 |
+
return update_model_info(provider, cerebras_model)
|
2102 |
+
elif provider == "GoogleAI":
|
2103 |
+
return update_model_info(provider, googleai_model)
|
2104 |
return "<p>Model information not available</p>"
|
2105 |
+
|
2106 |
+
def update_vision_indicator(provider, model_choice):
|
2107 |
+
"""Update the vision capability indicator"""
|
2108 |
+
return is_vision_model(provider, model_choice)
|
2109 |
+
|
2110 |
+
def update_image_upload_visibility(provider, model_choice):
|
2111 |
+
"""Show/hide image upload based on model vision capabilities"""
|
2112 |
+
is_vision = is_vision_model(provider, model_choice)
|
2113 |
+
return gr.update(visible=is_vision)
|
2114 |
|
2115 |
+
# Search model function - FIXED FUNCTION
|
2116 |
+
def search_openrouter_models(search_term):
|
2117 |
+
"""Filter OpenRouter models based on search term"""
|
2118 |
+
all_models = [model[0] for model in OPENROUTER_ALL_MODELS]
|
2119 |
+
if not search_term:
|
2120 |
+
return gr.update(choices=all_models, value=all_models[0] if all_models else None)
|
2121 |
+
|
2122 |
+
filtered_models = [model for model in all_models if search_term.lower() in model.lower()]
|
2123 |
|
2124 |
+
if filtered_models:
|
2125 |
+
return gr.update(choices=filtered_models, value=filtered_models[0])
|
2126 |
+
else:
|
2127 |
+
return gr.update(choices=all_models, value=all_models[0] if all_models else None)
|
|
|
|
|
2128 |
|
2129 |
+
def search_openai_models(search_term):
|
2130 |
+
"""Filter OpenAI models based on search term"""
|
2131 |
+
all_models = list(OPENAI_MODELS.keys())
|
2132 |
+
if not search_term:
|
2133 |
+
return gr.update(choices=all_models, value="gpt-3.5-turbo" if "gpt-3.5-turbo" in all_models else all_models[0] if all_models else None)
|
2134 |
|
2135 |
+
filtered_models = [model for model in all_models if search_term.lower() in model.lower()]
|
2136 |
+
|
2137 |
+
if filtered_models:
|
2138 |
+
return gr.update(choices=filtered_models, value=filtered_models[0])
|
2139 |
+
else:
|
2140 |
+
return gr.update(choices=all_models, value="gpt-3.5-turbo" if "gpt-3.5-turbo" in all_models else all_models[0] if all_models else None)
|
2141 |
|
2142 |
+
def search_hf_models(search_term):
|
2143 |
+
"""Filter HuggingFace models based on search term"""
|
2144 |
+
all_models = list(HUGGINGFACE_MODELS.keys())
|
2145 |
+
if not search_term:
|
2146 |
+
default_model = "mistralai/Mistral-7B-Instruct-v0.3" if "mistralai/Mistral-7B-Instruct-v0.3" in all_models else all_models[0] if all_models else None
|
2147 |
+
return gr.update(choices=all_models, value=default_model)
|
2148 |
|
2149 |
+
filtered_models = [model for model in all_models if search_term.lower() in model.lower()]
|
2150 |
+
|
2151 |
+
if filtered_models:
|
2152 |
+
return gr.update(choices=filtered_models, value=filtered_models[0])
|
2153 |
+
else:
|
2154 |
+
default_model = "mistralai/Mistral-7B-Instruct-v0.3" if "mistralai/Mistral-7B-Instruct-v0.3" in all_models else all_models[0] if all_models else None
|
2155 |
+
return gr.update(choices=all_models, value=default_model)
|
2156 |
|
2157 |
+
def search_groq_models(search_term):
|
2158 |
+
"""Filter Groq models based on search term"""
|
2159 |
+
all_models = list(GROQ_MODELS.keys())
|
2160 |
+
if not search_term:
|
2161 |
+
default_model = "llama-3.1-8b-instant" if "llama-3.1-8b-instant" in all_models else all_models[0] if all_models else None
|
2162 |
+
return gr.update(choices=all_models, value=default_model)
|
2163 |
|
2164 |
+
filtered_models = [model for model in all_models if search_term.lower() in model.lower()]
|
2165 |
+
|
2166 |
+
if filtered_models:
|
2167 |
+
return gr.update(choices=filtered_models, value=filtered_models[0])
|
2168 |
+
else:
|
2169 |
+
default_model = "llama-3.1-8b-instant" if "llama-3.1-8b-instant" in all_models else all_models[0] if all_models else None
|
2170 |
+
return gr.update(choices=all_models, value=default_model)
|
2171 |
|
2172 |
+
def search_cohere_models(search_term):
|
2173 |
+
"""Filter Cohere models based on search term"""
|
2174 |
+
all_models = list(COHERE_MODELS.keys())
|
2175 |
+
if not search_term:
|
2176 |
+
default_model = "command-r-plus" if "command-r-plus" in all_models else all_models[0] if all_models else None
|
2177 |
+
return gr.update(choices=all_models, value=default_model)
|
2178 |
|
2179 |
+
filtered_models = [model for model in all_models if search_term.lower() in model.lower()]
|
2180 |
+
|
2181 |
+
if filtered_models:
|
2182 |
+
return gr.update(choices=filtered_models, value=filtered_models[0])
|
2183 |
+
else:
|
2184 |
+
default_model = "command-r-plus" if "command-r-plus" in all_models else all_models[0] if all_models else None
|
2185 |
+
return gr.update(choices=all_models, value=default_model)
|
2186 |
|
2187 |
+
def search_together_models(search_term):
|
2188 |
+
"""Filter Together models based on search term"""
|
2189 |
+
all_models = list(TOGETHER_MODELS.keys())
|
2190 |
+
if not search_term:
|
2191 |
+
default_model = "meta-llama/Llama-3.1-8B-Instruct" if "meta-llama/Llama-3.1-8B-Instruct" in all_models else all_models[0] if all_models else None
|
2192 |
+
return gr.update(choices=all_models, value=default_model)
|
2193 |
|
2194 |
+
filtered_models = [model for model in all_models if search_term.lower() in model.lower()]
|
2195 |
+
|
2196 |
+
if filtered_models:
|
2197 |
+
return gr.update(choices=filtered_models, value=filtered_models[0])
|
2198 |
+
else:
|
2199 |
+
default_model = "meta-llama/Llama-3.1-8B-Instruct" if "meta-llama/Llama-3.1-8B-Instruct" in all_models else all_models[0] if all_models else None
|
2200 |
+
return gr.update(choices=all_models, value=default_model)
|
2201 |
|
2202 |
+
def search_ovh_models(search_term):
|
2203 |
+
"""Filter OVH models based on search term"""
|
2204 |
+
all_models = list(OVH_MODELS.keys())
|
2205 |
+
if not search_term:
|
2206 |
+
default_model = "ovh/llama-3.1-8b-instruct" if "ovh/llama-3.1-8b-instruct" in all_models else all_models[0] if all_models else None
|
2207 |
+
return gr.update(choices=all_models, value=default_model)
|
2208 |
|
2209 |
+
filtered_models = [model for model in all_models if search_term.lower() in model.lower()]
|
2210 |
+
|
2211 |
+
if filtered_models:
|
2212 |
+
return gr.update(choices=filtered_models, value=filtered_models[0])
|
2213 |
+
else:
|
2214 |
+
default_model = "ovh/llama-3.1-8b-instruct" if "ovh/llama-3.1-8b-instruct" in all_models else all_models[0] if all_models else None
|
2215 |
+
return gr.update(choices=all_models, value=default_model)
|
2216 |
+
|
2217 |
+
def search_cerebras_models(search_term):
|
2218 |
+
"""Filter Cerebras models based on search term"""
|
2219 |
+
all_models = list(CEREBRAS_MODELS.keys())
|
2220 |
+
if not search_term:
|
2221 |
+
default_model = "cerebras/llama-3.1-8b" if "cerebras/llama-3.1-8b" in all_models else all_models[0] if all_models else None
|
2222 |
+
return gr.update(choices=all_models, value=default_model)
|
2223 |
+
|
2224 |
+
filtered_models = [model for model in all_models if search_term.lower() in model.lower()]
|
2225 |
+
|
2226 |
+
if filtered_models:
|
2227 |
+
return gr.update(choices=filtered_models, value=filtered_models[0])
|
2228 |
+
else:
|
2229 |
+
default_model = "cerebras/llama-3.1-8b" if "cerebras/llama-3.1-8b" in all_models else all_models[0] if all_models else None
|
2230 |
+
return gr.update(choices=all_models, value=default_model)
|
2231 |
+
|
2232 |
+
def search_googleai_models(search_term):
|
2233 |
+
"""Filter GoogleAI models based on search term"""
|
2234 |
+
all_models = list(GOOGLEAI_MODELS.keys())
|
2235 |
+
if not search_term:
|
2236 |
+
default_model = "gemini-1.5-pro" if "gemini-1.5-pro" in all_models else all_models[0] if all_models else None
|
2237 |
+
return gr.update(choices=all_models, value=default_model)
|
2238 |
+
|
2239 |
+
filtered_models = [model for model in all_models if search_term.lower() in model.lower()]
|
2240 |
+
|
2241 |
+
if filtered_models:
|
2242 |
+
return gr.update(choices=filtered_models, value=filtered_models[0])
|
2243 |
+
else:
|
2244 |
+
default_model = "gemini-1.5-pro" if "gemini-1.5-pro" in all_models else all_models[0] if all_models else None
|
2245 |
+
return gr.update(choices=all_models, value=default_model)
|
2246 |
+
|
2247 |
|
2248 |
def refresh_groq_models_list():
|
2249 |
"""Refresh the list of Groq models"""
|
2250 |
global GROQ_MODELS
|
2251 |
GROQ_MODELS = fetch_groq_models()
|
2252 |
return gr.update(choices=list(GROQ_MODELS.keys()))
|
2253 |
+
|
2254 |
+
def get_current_model(provider, openrouter_model, openai_model, hf_model, groq_model, cohere_model, together_model, ovh_model, cerebras_model, googleai_model):
|
2255 |
"""Get the currently selected model based on provider"""
|
2256 |
if provider == "OpenRouter":
|
2257 |
return openrouter_model
|
|
|
2263 |
return groq_model
|
2264 |
elif provider == "Cohere":
|
2265 |
return cohere_model
|
2266 |
+
elif provider == "Together":
|
2267 |
+
return together_model
|
2268 |
+
elif provider == "OVH":
|
2269 |
+
return ovh_model
|
2270 |
+
elif provider == "Cerebras":
|
2271 |
+
return cerebras_model
|
2272 |
+
elif provider == "GoogleAI":
|
2273 |
+
return googleai_model
|
2274 |
return None
|
2275 |
|
2276 |
# Process uploaded images
|
|
|
2284 |
provider_choice.change(
|
2285 |
fn=toggle_model_dropdowns,
|
2286 |
inputs=provider_choice,
|
2287 |
+
outputs={
|
2288 |
+
openrouter_model: None,
|
2289 |
+
openai_model: None,
|
2290 |
+
hf_model: None,
|
2291 |
+
groq_model: None,
|
2292 |
+
cohere_model: None,
|
2293 |
+
together_model: None,
|
2294 |
+
ovh_model: None,
|
2295 |
+
cerebras_model: None,
|
2296 |
+
googleai_model: None
|
2297 |
+
}
|
2298 |
).then(
|
2299 |
fn=update_context_for_provider,
|
2300 |
+
inputs=[provider_choice, openrouter_model, openai_model, hf_model, groq_model, cohere_model, together_model, ovh_model, cerebras_model, googleai_model],
|
2301 |
outputs=context_display
|
2302 |
).then(
|
2303 |
fn=update_model_info_for_provider,
|
2304 |
+
inputs=[provider_choice, openrouter_model, openai_model, hf_model, groq_model, cohere_model, together_model, ovh_model, cerebras_model, googleai_model],
|
2305 |
outputs=model_info_display
|
2306 |
+
).then(
|
2307 |
+
fn=lambda provider, model: update_vision_indicator(
|
2308 |
+
provider,
|
2309 |
+
get_current_model(provider, model, None, None, None, None, None, None, None, None)
|
2310 |
+
),
|
2311 |
+
inputs=[provider_choice, openrouter_model],
|
2312 |
+
outputs=is_vision_indicator
|
2313 |
+
).then(
|
2314 |
+
fn=lambda provider, model: update_image_upload_visibility(
|
2315 |
+
provider,
|
2316 |
+
get_current_model(provider, model, None, None, None, None, None, None, None, None)
|
2317 |
+
),
|
2318 |
+
inputs=[provider_choice, openrouter_model],
|
2319 |
+
outputs=image_upload_container
|
2320 |
)
|
2321 |
|
2322 |
# Set up model search event - FIXED VERSION
|
2323 |
+
# We'll now have the correct function that returns model dropdown updates
|
2324 |
model_search.change(
|
2325 |
+
fn=lambda provider, search: {
|
2326 |
+
"OpenRouter": search_openrouter_models(search),
|
2327 |
+
"OpenAI": search_openai_models(search),
|
2328 |
+
"HuggingFace": search_hf_models(search),
|
2329 |
+
"Groq": search_groq_models(search),
|
2330 |
+
"Cohere": search_cohere_models(search),
|
2331 |
+
"Together": search_together_models(search),
|
2332 |
+
"OVH": search_ovh_models(search),
|
2333 |
+
"Cerebras": search_cerebras_models(search),
|
2334 |
+
"GoogleAI": search_googleai_models(search)
|
2335 |
+
}[provider],
|
2336 |
inputs=[provider_choice, model_search],
|
2337 |
+
outputs=[
|
2338 |
+
openrouter_model, openai_model, hf_model, groq_model,
|
2339 |
+
cohere_model, together_model, ovh_model, cerebras_model, googleai_model
|
2340 |
+
]
|
2341 |
)
|
2342 |
|
2343 |
+
# Set up model change events to update context display and model info
|
2344 |
openrouter_model.change(
|
2345 |
fn=lambda model: update_context_display("OpenRouter", model),
|
2346 |
inputs=openrouter_model,
|
|
|
2349 |
fn=lambda model: update_model_info("OpenRouter", model),
|
2350 |
inputs=openrouter_model,
|
2351 |
outputs=model_info_display
|
2352 |
+
).then(
|
2353 |
+
fn=lambda model: update_vision_indicator("OpenRouter", model),
|
2354 |
+
inputs=openrouter_model,
|
2355 |
+
outputs=is_vision_indicator
|
2356 |
+
).then(
|
2357 |
+
fn=lambda model: update_image_upload_visibility("OpenRouter", model),
|
2358 |
+
inputs=openrouter_model,
|
2359 |
+
outputs=image_upload_container
|
2360 |
)
|
2361 |
|
2362 |
openai_model.change(
|
|
|
2367 |
fn=lambda model: update_model_info("OpenAI", model),
|
2368 |
inputs=openai_model,
|
2369 |
outputs=model_info_display
|
2370 |
+
).then(
|
2371 |
+
fn=lambda model: update_vision_indicator("OpenAI", model),
|
2372 |
+
inputs=openai_model,
|
2373 |
+
outputs=is_vision_indicator
|
2374 |
+
).then(
|
2375 |
+
fn=lambda model: update_image_upload_visibility("OpenAI", model),
|
2376 |
+
inputs=openai_model,
|
2377 |
+
outputs=image_upload_container
|
2378 |
)
|
2379 |
|
2380 |
hf_model.change(
|
|
|
2385 |
fn=lambda model: update_model_info("HuggingFace", model),
|
2386 |
inputs=hf_model,
|
2387 |
outputs=model_info_display
|
2388 |
+
).then(
|
2389 |
+
fn=lambda model: update_vision_indicator("HuggingFace", model),
|
2390 |
+
inputs=hf_model,
|
2391 |
+
outputs=is_vision_indicator
|
2392 |
+
).then(
|
2393 |
+
fn=lambda model: update_image_upload_visibility("HuggingFace", model),
|
2394 |
+
inputs=hf_model,
|
2395 |
+
outputs=image_upload_container
|
2396 |
)
|
2397 |
|
2398 |
groq_model.change(
|
|
|
2403 |
fn=lambda model: update_model_info("Groq", model),
|
2404 |
inputs=groq_model,
|
2405 |
outputs=model_info_display
|
2406 |
+
).then(
|
2407 |
+
fn=lambda model: update_vision_indicator("Groq", model),
|
2408 |
+
inputs=groq_model,
|
2409 |
+
outputs=is_vision_indicator
|
2410 |
+
).then(
|
2411 |
+
fn=lambda model: update_image_upload_visibility("Groq", model),
|
2412 |
+
inputs=groq_model,
|
2413 |
+
outputs=image_upload_container
|
2414 |
)
|
2415 |
|
2416 |
cohere_model.change(
|
|
|
2421 |
fn=lambda model: update_model_info("Cohere", model),
|
2422 |
inputs=cohere_model,
|
2423 |
outputs=model_info_display
|
2424 |
+
).then(
|
2425 |
+
fn=lambda model: update_vision_indicator("Cohere", model),
|
2426 |
+
inputs=cohere_model,
|
2427 |
+
outputs=is_vision_indicator
|
2428 |
+
).then(
|
2429 |
+
fn=lambda model: update_image_upload_visibility("Cohere", model),
|
2430 |
+
inputs=cohere_model,
|
2431 |
+
outputs=image_upload_container
|
2432 |
)
|
2433 |
|
2434 |
+
together_model.change(
|
2435 |
+
fn=lambda model: update_context_display("Together", model),
|
2436 |
+
inputs=together_model,
|
2437 |
outputs=context_display
|
2438 |
).then(
|
2439 |
+
fn=lambda model: update_model_info("Together", model),
|
2440 |
+
inputs=together_model,
|
2441 |
outputs=model_info_display
|
2442 |
+
).then(
|
2443 |
+
fn=lambda model: update_vision_indicator("Together", model),
|
2444 |
+
inputs=together_model,
|
2445 |
+
outputs=is_vision_indicator
|
2446 |
+
).then(
|
2447 |
+
fn=lambda model: update_image_upload_visibility("Together", model),
|
2448 |
+
inputs=together_model,
|
2449 |
+
outputs=image_upload_container
|
2450 |
)
|
2451 |
|
2452 |
+
ovh_model.change(
|
2453 |
+
fn=lambda model: update_context_display("OVH", model),
|
2454 |
+
inputs=ovh_model,
|
2455 |
+
outputs=context_display
|
2456 |
+
).then(
|
2457 |
+
fn=lambda model: update_model_info("OVH", model),
|
2458 |
+
inputs=ovh_model,
|
2459 |
+
outputs=model_info_display
|
2460 |
+
).then(
|
2461 |
+
fn=lambda model: update_vision_indicator("OVH", model),
|
2462 |
+
inputs=ovh_model,
|
2463 |
+
outputs=is_vision_indicator
|
2464 |
+
).then(
|
2465 |
+
fn=lambda model: update_image_upload_visibility("OVH", model),
|
2466 |
+
inputs=ovh_model,
|
2467 |
+
outputs=image_upload_container
|
2468 |
+
)
|
2469 |
+
|
2470 |
+
cerebras_model.change(
|
2471 |
+
fn=lambda model: update_context_display("Cerebras", model),
|
2472 |
+
inputs=cerebras_model,
|
2473 |
+
outputs=context_display
|
2474 |
+
).then(
|
2475 |
+
fn=lambda model: update_model_info("Cerebras", model),
|
2476 |
+
inputs=cerebras_model,
|
2477 |
+
outputs=model_info_display
|
2478 |
+
).then(
|
2479 |
+
fn=lambda model: update_vision_indicator("Cerebras", model),
|
2480 |
+
inputs=cerebras_model,
|
2481 |
+
outputs=is_vision_indicator
|
2482 |
+
).then(
|
2483 |
+
fn=lambda model: update_image_upload_visibility("Cerebras", model),
|
2484 |
+
inputs=cerebras_model,
|
2485 |
+
outputs=image_upload_container
|
2486 |
+
)
|
2487 |
+
|
2488 |
+
googleai_model.change(
|
2489 |
+
fn=lambda model: update_context_display("GoogleAI", model),
|
2490 |
+
inputs=googleai_model,
|
2491 |
+
outputs=context_display
|
2492 |
+
).then(
|
2493 |
+
fn=lambda model: update_model_info("GoogleAI", model),
|
2494 |
+
inputs=googleai_model,
|
2495 |
+
outputs=model_info_display
|
2496 |
+
).then(
|
2497 |
+
fn=lambda model: update_vision_indicator("GoogleAI", model),
|
2498 |
+
inputs=googleai_model,
|
2499 |
+
outputs=is_vision_indicator
|
2500 |
+
).then(
|
2501 |
+
fn=lambda model: update_image_upload_visibility("GoogleAI", model),
|
2502 |
+
inputs=googleai_model,
|
2503 |
+
outputs=image_upload_container
|
2504 |
+
)
|
2505 |
|
2506 |
# Set up submission event
|
2507 |
+
def submit_message(message, history, provider, openrouter_model, openai_model, hf_model, groq_model, cohere_model, together_model, ovh_model, cerebras_model, googleai_model,
|
2508 |
temperature, max_tokens, top_p, frequency_penalty, presence_penalty, repetition_penalty,
|
2509 |
top_k, min_p, seed, top_a, stream_output, response_format,
|
2510 |
images, documents, reasoning_effort, system_message, transforms, api_key_override):
|
2511 |
"""Submit message to selected provider and model"""
|
2512 |
# Get the currently selected model
|
2513 |
+
model_choice = get_current_model(provider, openrouter_model, openai_model, hf_model, groq_model, cohere_model, together_model, ovh_model, cerebras_model, googleai_model)
|
2514 |
|
2515 |
# Check if model is selected
|
2516 |
if not model_choice:
|
2517 |
+
new_history = history.copy()
|
2518 |
+
new_history.append([message, f"Error: No model selected for provider {provider}"])
|
2519 |
+
return new_history
|
2520 |
|
2521 |
# Call the ask_ai function with the appropriate parameters
|
2522 |
return ask_ai(
|
|
|
2549 |
fn=submit_message,
|
2550 |
inputs=[
|
2551 |
message, chatbot, provider_choice,
|
2552 |
+
openrouter_model, openai_model, hf_model, groq_model, cohere_model, together_model, ovh_model, cerebras_model, googleai_model,
|
2553 |
temperature, max_tokens, top_p, frequency_penalty, presence_penalty, repetition_penalty,
|
2554 |
top_k, min_p, seed, top_a, stream_output, response_format,
|
2555 |
images, documents, reasoning_effort, system_message, transforms, api_key_override
|
|
|
2567 |
fn=submit_message,
|
2568 |
inputs=[
|
2569 |
message, chatbot, provider_choice,
|
2570 |
+
openrouter_model, openai_model, hf_model, groq_model, cohere_model, together_model, ovh_model, cerebras_model, googleai_model,
|
2571 |
temperature, max_tokens, top_p, frequency_penalty, presence_penalty, repetition_penalty,
|
2572 |
top_k, min_p, seed, top_a, stream_output, response_format,
|
2573 |
images, documents, reasoning_effort, system_message, transforms, api_key_override
|
|
|
2615 |
logger.warning("WARNING: COHERE_API_KEY environment variable is not set")
|
2616 |
missing_keys.append("Cohere")
|
2617 |
|
2618 |
+
if not TOGETHER_API_KEY:
|
2619 |
+
logger.warning("WARNING: TOGETHER_API_KEY environment variable is not set")
|
2620 |
+
missing_keys.append("Together")
|
2621 |
+
|
2622 |
+
if not GOOGLEAI_API_KEY:
|
2623 |
+
logger.warning("WARNING: GOOGLEAI_API_KEY environment variable is not set")
|
2624 |
+
missing_keys.append("GoogleAI")
|
2625 |
|
2626 |
if missing_keys:
|
2627 |
print("Missing API keys for the following providers:")
|
|
|
2632 |
|
2633 |
if "OpenRouter" in missing_keys:
|
2634 |
print("\nNote: OpenRouter offers free tier access to many models!")
|
2635 |
+
|
2636 |
+
if "OVH" not in missing_keys and "Cerebras" not in missing_keys:
|
2637 |
+
print("\nNote: OVH AI Endpoints (beta) and Cerebras offer free usage tiers!")
|
2638 |
|
2639 |
print("\nStarting Multi-Provider CrispChat application...")
|
2640 |
demo = create_app()
|