import os import gradio as gr import requests import json import base64 import logging import io import time from typing import List, Dict, Any, Union, Tuple, Optional from dotenv import load_dotenv # Load environment variables from .env file load_dotenv() # Configure logging logging.basicConfig(level=logging.INFO, format='%(asctime)s - %(name)s - %(levelname)s - %(message)s') logger = logging.getLogger(__name__) # Gracefully import libraries with fallbacks try: from PIL import Image HAS_PIL = True except ImportError: logger.warning("PIL not installed. Image processing will be limited.") HAS_PIL = False try: import PyPDF2 HAS_PYPDF2 = True except ImportError: logger.warning("PyPDF2 not installed. PDF processing will be limited.") HAS_PYPDF2 = False try: import markdown HAS_MARKDOWN = True except ImportError: logger.warning("Markdown not installed. Markdown processing will be limited.") HAS_MARKDOWN = False try: import openai HAS_OPENAI = True except ImportError: logger.warning("OpenAI package not installed. OpenAI models will be unavailable.") HAS_OPENAI = False try: from groq import Groq HAS_GROQ = True except ImportError: logger.warning("Groq client not installed. Groq API will be unavailable.") HAS_GROQ = False try: import cohere HAS_COHERE = True except ImportError: logger.warning("Cohere package not installed. Cohere models will be unavailable.") HAS_COHERE = False try: from huggingface_hub import InferenceClient HAS_HF = True except ImportError: logger.warning("HuggingFace hub not installed. HuggingFace models will be limited.") HAS_HF = False # API keys from environment OPENROUTER_API_KEY = os.environ.get("OPENROUTER_API_KEY", "") OPENAI_API_KEY = os.environ.get("OPENAI_API_KEY", "") GROQ_API_KEY = os.environ.get("GROQ_API_KEY", "") COHERE_API_KEY = os.environ.get("COHERE_API_KEY", "") HF_API_KEY = os.environ.get("HF_API_KEY", "") TOGETHER_API_KEY = os.environ.get("TOGETHER_API_KEY", "") GOOGLEAI_API_KEY = os.environ.get("GOOGLEAI_API_KEY", "") # Print application startup message with timestamp current_time = time.strftime("%Y-%m-%d %H:%M:%S") print(f"===== Application Startup at {current_time} =====\n") # ========================================================== # MODEL DEFINITIONS # ========================================================== # OPENROUTER MODELS # These are the original models from the provided code OPENROUTER_MODELS = [ # 1M+ Context Models {"category": "1M+ Context", "models": [ ("Google: Gemini Pro 2.0 Experimental", "google/gemini-2.0-pro-exp-02-05:free", 2000000), ("Google: Gemini 2.0 Flash Thinking Experimental 01-21", "google/gemini-2.0-flash-thinking-exp:free", 1048576), ("Google: Gemini Flash 2.0 Experimental", "google/gemini-2.0-flash-exp:free", 1048576), ("Google: Gemini Pro 2.5 Experimental", "google/gemini-2.5-pro-exp-03-25:free", 1000000), ("Google: Gemini Flash 1.5 8B Experimental", "google/gemini-flash-1.5-8b-exp", 1000000), ]}, # 100K-1M Context Models {"category": "100K+ Context", "models": [ ("DeepSeek: DeepSeek R1 Zero", "deepseek/deepseek-r1-zero:free", 163840), ("DeepSeek: R1", "deepseek/deepseek-r1:free", 163840), ("DeepSeek: DeepSeek V3 Base", "deepseek/deepseek-v3-base:free", 131072), ("DeepSeek: DeepSeek V3 0324", "deepseek/deepseek-chat-v3-0324:free", 131072), ("Google: Gemma 3 4B", "google/gemma-3-4b-it:free", 131072), ("Google: Gemma 3 12B", "google/gemma-3-12b-it:free", 131072), ("Nous: DeepHermes 3 Llama 3 8B Preview", "nousresearch/deephermes-3-llama-3-8b-preview:free", 131072), ("Qwen: Qwen2.5 VL 72B Instruct", "qwen/qwen2.5-vl-72b-instruct:free", 131072), ("DeepSeek: DeepSeek V3", "deepseek/deepseek-chat:free", 131072), ("NVIDIA: Llama 3.1 Nemotron 70B Instruct", "nvidia/llama-3.1-nemotron-70b-instruct:free", 131072), ("Meta: Llama 3.2 1B Instruct", "meta-llama/llama-3.2-1b-instruct:free", 131072), ("Meta: Llama 3.2 11B Vision Instruct", "meta-llama/llama-3.2-11b-vision-instruct:free", 131072), ("Meta: Llama 3.1 8B Instruct", "meta-llama/llama-3.1-8b-instruct:free", 131072), ("Mistral: Mistral Nemo", "mistralai/mistral-nemo:free", 128000), ]}, # 64K-100K Context Models {"category": "64K-100K Context", "models": [ ("Mistral: Mistral Small 3.1 24B", "mistralai/mistral-small-3.1-24b-instruct:free", 96000), ("Google: Gemma 3 27B", "google/gemma-3-27b-it:free", 96000), ("Qwen: Qwen2.5 VL 3B Instruct", "qwen/qwen2.5-vl-3b-instruct:free", 64000), ("DeepSeek: R1 Distill Qwen 14B", "deepseek/deepseek-r1-distill-qwen-14b:free", 64000), ("Qwen: Qwen2.5-VL 7B Instruct", "qwen/qwen-2.5-vl-7b-instruct:free", 64000), ]}, # 32K-64K Context Models {"category": "32K-64K Context", "models": [ ("Google: LearnLM 1.5 Pro Experimental", "google/learnlm-1.5-pro-experimental:free", 40960), ("Qwen: QwQ 32B", "qwen/qwq-32b:free", 40000), ("Google: Gemini 2.0 Flash Thinking Experimental", "google/gemini-2.0-flash-thinking-exp-1219:free", 40000), ("Bytedance: UI-TARS 72B", "bytedance-research/ui-tars-72b:free", 32768), ("Qwerky 72b", "featherless/qwerky-72b:free", 32768), ("OlympicCoder 7B", "open-r1/olympiccoder-7b:free", 32768), ("OlympicCoder 32B", "open-r1/olympiccoder-32b:free", 32768), ("Google: Gemma 3 1B", "google/gemma-3-1b-it:free", 32768), ("Reka: Flash 3", "rekaai/reka-flash-3:free", 32768), ("Dolphin3.0 R1 Mistral 24B", "cognitivecomputations/dolphin3.0-r1-mistral-24b:free", 32768), ("Dolphin3.0 Mistral 24B", "cognitivecomputations/dolphin3.0-mistral-24b:free", 32768), ("Mistral: Mistral Small 3", "mistralai/mistral-small-24b-instruct-2501:free", 32768), ("Qwen2.5 Coder 32B Instruct", "qwen/qwen-2.5-coder-32b-instruct:free", 32768), ("Qwen2.5 72B Instruct", "qwen/qwen-2.5-72b-instruct:free", 32768), ]}, # 8K-32K Context Models {"category": "8K-32K Context", "models": [ ("Meta: Llama 3.2 3B Instruct", "meta-llama/llama-3.2-3b-instruct:free", 20000), ("Qwen: QwQ 32B Preview", "qwen/qwq-32b-preview:free", 16384), ("DeepSeek: R1 Distill Qwen 32B", "deepseek/deepseek-r1-distill-qwen-32b:free", 16000), ("Qwen: Qwen2.5 VL 32B Instruct", "qwen/qwen2.5-vl-32b-instruct:free", 8192), ("Moonshot AI: Moonlight 16B A3B Instruct", "moonshotai/moonlight-16b-a3b-instruct:free", 8192), ("DeepSeek: R1 Distill Llama 70B", "deepseek/deepseek-r1-distill-llama-70b:free", 8192), ("Qwen 2 7B Instruct", "qwen/qwen-2-7b-instruct:free", 8192), ("Google: Gemma 2 9B", "google/gemma-2-9b-it:free", 8192), ("Mistral: Mistral 7B Instruct", "mistralai/mistral-7b-instruct:free", 8192), ("Microsoft: Phi-3 Mini 128K Instruct", "microsoft/phi-3-mini-128k-instruct:free", 8192), ("Microsoft: Phi-3 Medium 128K Instruct", "microsoft/phi-3-medium-128k-instruct:free", 8192), ("Meta: Llama 3 8B Instruct", "meta-llama/llama-3-8b-instruct:free", 8192), ("OpenChat 3.5 7B", "openchat/openchat-7b:free", 8192), ("Meta: Llama 3.3 70B Instruct", "meta-llama/llama-3.3-70b-instruct:free", 8000), ]}, # <8K Context Models {"category": "4K Context", "models": [ ("AllenAI: Molmo 7B D", "allenai/molmo-7b-d:free", 4096), ("Rogue Rose 103B v0.2", "sophosympatheia/rogue-rose-103b-v0.2:free", 4096), ("Toppy M 7B", "undi95/toppy-m-7b:free", 4096), ("Hugging Face: Zephyr 7B", "huggingfaceh4/zephyr-7b-beta:free", 4096), ("MythoMax 13B", "gryphe/mythomax-l2-13b:free", 4096), ]}, # Vision-capable Models {"category": "Vision Models", "models": [ ("Google: Gemini Pro 2.0 Experimental", "google/gemini-2.0-pro-exp-02-05:free", 2000000), ("Google: Gemini 2.0 Flash Thinking Experimental 01-21", "google/gemini-2.0-flash-thinking-exp:free", 1048576), ("Google: Gemini Flash 2.0 Experimental", "google/gemini-2.0-flash-exp:free", 1048576), ("Google: Gemini Pro 2.5 Experimental", "google/gemini-2.5-pro-exp-03-25:free", 1000000), ("Google: Gemini Flash 1.5 8B Experimental", "google/gemini-flash-1.5-8b-exp", 1000000), ("Google: Gemma 3 4B", "google/gemma-3-4b-it:free", 131072), ("Google: Gemma 3 12B", "google/gemma-3-12b-it:free", 131072), ("Qwen: Qwen2.5 VL 72B Instruct", "qwen/qwen2.5-vl-72b-instruct:free", 131072), ("Meta: Llama 3.2 11B Vision Instruct", "meta-llama/llama-3.2-11b-vision-instruct:free", 131072), ("Mistral: Mistral Small 3.1 24B", "mistralai/mistral-small-3.1-24b-instruct:free", 96000), ("Google: Gemma 3 27B", "google/gemma-3-27b-it:free", 96000), ("Qwen: Qwen2.5 VL 3B Instruct", "qwen/qwen2.5-vl-3b-instruct:free", 64000), ("Qwen: Qwen2.5-VL 7B Instruct", "qwen/qwen-2.5-vl-7b-instruct:free", 64000), ("Google: LearnLM 1.5 Pro Experimental", "google/learnlm-1.5-pro-experimental:free", 40960), ("Google: Gemini 2.0 Flash Thinking Experimental", "google/gemini-2.0-flash-thinking-exp-1219:free", 40000), ("Bytedance: UI-TARS 72B", "bytedance-research/ui-tars-72b:free", 32768), ("Google: Gemma 3 1B", "google/gemma-3-1b-it:free", 32768), ("Qwen: Qwen2.5 VL 32B Instruct", "qwen/qwen2.5-vl-32b-instruct:free", 8192), ("AllenAI: Molmo 7B D", "allenai/molmo-7b-d:free", 4096), ]}, ] # Flatten OpenRouter model list for easier access OPENROUTER_ALL_MODELS = [] for category in OPENROUTER_MODELS: for model in category["models"]: if model not in OPENROUTER_ALL_MODELS: # Avoid duplicates OPENROUTER_ALL_MODELS.append(model) # VISION MODELS - For tracking which models support images VISION_MODELS = { "OpenRouter": [model[0] for model in OPENROUTER_MODELS[-1]["models"]], # Last category is Vision Models "OpenAI": [ "gpt-4-vision-preview", "gpt-4o", "gpt-4o-mini", "gpt-4-turbo", "gpt-4-turbo-preview", "gpt-4-0125-preview", "gpt-4-1106-preview", "o1-preview", "o1-mini" ], "HuggingFace": [ "Qwen/Qwen2.5-VL-7B-Instruct", "Qwen/qwen2.5-vl-3b-instruct", "Qwen/qwen2.5-vl-32b-instruct", "Qwen/qwen2.5-vl-72b-instruct" ], "Groq": ["llama-3.2-11b-vision", "llama-3.2-90b-vision"], "Together": ["Llama-3.2-11B-Vision-Instruct", "Llama-3.2-90B-Vision-Instruct"], "OVH": ["llava-next-mistral-7b", "qwen2.5-vl-72b-instruct"], "Cerebras": [], "GoogleAI": ["gemini-1.5-pro", "gemini-1.0-pro", "gemini-1.5-flash", "gemini-2.0-pro", "gemini-2.5-pro"] } # OPENAI MODELS OPENAI_MODELS = { "gpt-3.5-turbo": 16385, "gpt-3.5-turbo-0125": 16385, "gpt-3.5-turbo-1106": 16385, "gpt-3.5-turbo-instruct": 4096, "gpt-4": 8192, "gpt-4-0314": 8192, "gpt-4-0613": 8192, "gpt-4-turbo": 128000, "gpt-4-turbo-2024-04-09": 128000, "gpt-4-turbo-preview": 128000, "gpt-4-0125-preview": 128000, "gpt-4-1106-preview": 128000, "gpt-4o": 128000, "gpt-4o-2024-11-20": 128000, "gpt-4o-2024-08-06": 128000, "gpt-4o-2024-05-13": 128000, "gpt-4o-mini": 128000, "gpt-4o-mini-2024-07-18": 128000, "o1-preview": 128000, "o1-preview-2024-09-12": 128000, "o1-mini": 128000, "o1-mini-2024-09-12": 128000, } # HUGGINGFACE MODELS HUGGINGFACE_MODELS = { "microsoft/phi-3-mini-4k-instruct": 4096, "microsoft/Phi-3-mini-128k-instruct": 131072, "HuggingFaceH4/zephyr-7b-beta": 8192, "deepseek-ai/DeepSeek-Coder-V2-Instruct": 8192, "mistralai/Mistral-7B-Instruct-v0.3": 32768, "NousResearch/Nous-Hermes-2-Mixtral-8x7B-DPO": 32768, "microsoft/Phi-3.5-mini-instruct": 4096, "google/gemma-2-2b-it": 2048, "openai-community/gpt2": 1024, "microsoft/phi-2": 2048, "TinyLlama/TinyLlama-1.1B-Chat-v1.0": 2048, "VAGOsolutions/Llama-3-SauerkrautLM-8b-Instruct": 2048, "VAGOsolutions/Llama-3.1-SauerkrautLM-8b-Instruct": 4096, "VAGOsolutions/SauerkrautLM-Nemo-12b-Instruct": 4096, "openGPT-X/Teuken-7B-instruct-research-v0.4": 4096, "Qwen/Qwen2.5-7B-Instruct": 131072, "tiiuae/falcon-7b-instruct": 8192, "Qwen/QwQ-32B-preview": 32768, "Qwen/Qwen2.5-VL-7B-Instruct": 64000, "Qwen/qwen2.5-vl-3b-instruct": 64000, "Qwen/qwen2.5-vl-32b-instruct": 8192, "Qwen/qwen2.5-vl-72b-instruct": 131072, } # GROQ MODELS - We'll populate this dynamically DEFAULT_GROQ_MODELS = { "deepseek-r1-distill-llama-70b": 8192, "deepseek-r1-distill-qwen-32b": 8192, "gemma2-9b-it": 8192, "llama-3.1-8b-instant": 131072, "llama-3.2-1b-preview": 131072, "llama-3.2-3b-preview": 131072, "llama-3.2-11b-vision-preview": 131072, "llama-3.2-90b-vision-preview": 131072, "llama-3.3-70b-specdec": 131072, "llama-3.3-70b-versatile": 131072, "llama-guard-3-8b": 8192, "llama3-8b-8192": 8192, "llama3-70b-8192": 8192, "mistral-saba-24b": 32768, "qwen-2.5-32b": 32768, "qwen-2.5-coder-32b": 32768, "qwen-qwq-32b": 32768, "playai-tts": 4096, # Including TTS models but setting reasonable context limits "playai-tts-arabic": 4096, "distil-whisper-large-v3-en": 4096, "whisper-large-v3": 4096, "whisper-large-v3-turbo": 4096 } # COHERE MODELS COHERE_MODELS = { "command-r-plus-08-2024": 131072, "command-r-plus-04-2024": 131072, "command-r-plus": 131072, "command-r-08-2024": 131072, "command-r-03-2024": 131072, "command-r": 131072, "command": 4096, "command-nightly": 131072, "command-light": 4096, "command-light-nightly": 4096, "c4ai-aya-expanse-8b": 8192, "c4ai-aya-expanse-32b": 131072, } # TOGETHER MODELS TOGETHER_MODELS = { "meta-llama/Llama-3.1-70B-Instruct": 131072, "meta-llama/Llama-3.1-8B-Instruct": 131072, "meta-llama/Llama-3.3-70B-Instruct": 131072, "deepseek-ai/deepseek-r1-distill-llama-70b": 8192, "meta-llama/Llama-3.2-11B-Vision-Instruct": 131072, "meta-llama/Llama-3.2-90B-Vision-Instruct": 131072, } # OVH MODELS - OVH AI Endpoints (free beta) OVH_MODELS = { "ovh/codestral-mamba-7b-v0.1": 131072, "ovh/deepseek-r1-distill-llama-70b": 8192, "ovh/llama-3.1-70b-instruct": 131072, "ovh/llama-3.1-8b-instruct": 131072, "ovh/llama-3.3-70b-instruct": 131072, "ovh/llava-next-mistral-7b": 8192, "ovh/mistral-7b-instruct-v0.3": 32768, "ovh/mistral-nemo-2407": 131072, "ovh/mixtral-8x7b-instruct": 32768, "ovh/qwen2.5-coder-32b-instruct": 32768, "ovh/qwen2.5-vl-72b-instruct": 131072, } # CEREBRAS MODELS CEREBRAS_MODELS = { "cerebras/llama-3.1-8b": 8192, "cerebras/llama-3.3-70b": 8192, } # GOOGLE AI MODELS GOOGLEAI_MODELS = { "gemini-1.0-pro": 32768, "gemini-1.5-flash": 1000000, "gemini-1.5-pro": 1000000, "gemini-2.0-pro": 2000000, "gemini-2.5-pro": 2000000, } # Add all models with "vl", "vision", "visual" in their name to HF vision models for model_name in list(HUGGINGFACE_MODELS.keys()): if any(x in model_name.lower() for x in ["vl", "vision", "visual", "llava"]): if model_name not in VISION_MODELS["HuggingFace"]: VISION_MODELS["HuggingFace"].append(model_name) # ========================================================== # HELPER FUNCTIONS # ========================================================== def fetch_groq_models(): """Fetch available Groq models with proper error handling""" try: if not HAS_GROQ or not GROQ_API_KEY: logger.warning("Groq client not available or no API key. Using default model list.") return DEFAULT_GROQ_MODELS client = Groq(api_key=GROQ_API_KEY) models = client.models.list() # Create dictionary of model_id -> context size model_dict = {} for model in models.data: model_id = model.id # Map known context sizes or use a default if "llama-3" in model_id and "70b" in model_id: context_size = 131072 elif "llama-3" in model_id and "8b" in model_id: context_size = 131072 elif "mixtral" in model_id: context_size = 32768 elif "gemma" in model_id: context_size = 8192 elif "vision" in model_id: context_size = 131072 else: context_size = 8192 # Default assumption model_dict[model_id] = context_size # Ensure we have models by combining with defaults if not model_dict: return DEFAULT_GROQ_MODELS return {**DEFAULT_GROQ_MODELS, **model_dict} except Exception as e: logger.error(f"Error fetching Groq models: {e}") return DEFAULT_GROQ_MODELS # Initialize Groq models GROQ_MODELS = fetch_groq_models() def encode_image_to_base64(image_path): """Encode an image file to base64 string""" try: if isinstance(image_path, str): # File path as string with open(image_path, "rb") as image_file: encoded_string = base64.b64encode(image_file.read()).decode('utf-8') file_extension = image_path.split('.')[-1].lower() mime_type = f"image/{file_extension}" if file_extension in ["jpg", "jpeg"]: mime_type = "image/jpeg" elif file_extension == "png": mime_type = "image/png" elif file_extension == "webp": mime_type = "image/webp" return f"data:{mime_type};base64,{encoded_string}" elif hasattr(image_path, 'name'): # Handle Gradio file objects directly with open(image_path.name, "rb") as image_file: encoded_string = base64.b64encode(image_file.read()).decode('utf-8') file_extension = image_path.name.split('.')[-1].lower() mime_type = f"image/{file_extension}" if file_extension in ["jpg", "jpeg"]: mime_type = "image/jpeg" elif file_extension == "png": mime_type = "image/png" elif file_extension == "webp": mime_type = "image/webp" return f"data:{mime_type};base64,{encoded_string}" else: # Handle file object or other types logger.error(f"Unsupported image type: {type(image_path)}") return None except Exception as e: logger.error(f"Error encoding image: {str(e)}") return None def extract_text_from_file(file_path): """Extract text from various file types""" try: file_extension = file_path.split('.')[-1].lower() if file_extension == 'pdf': if HAS_PYPDF2: text = "" with open(file_path, 'rb') as file: pdf_reader = PyPDF2.PdfReader(file) for page_num in range(len(pdf_reader.pages)): page = pdf_reader.pages[page_num] text += page.extract_text() + "\n\n" return text else: return "PDF processing is not available (PyPDF2 not installed)" elif file_extension == 'md': with open(file_path, 'r', encoding='utf-8') as file: return file.read() elif file_extension == 'txt': with open(file_path, 'r', encoding='utf-8') as file: return file.read() else: return f"Unsupported file type: {file_extension}" except Exception as e: logger.error(f"Error extracting text from file: {str(e)}") return f"Error processing file: {str(e)}" def prepare_message_with_media(text, images=None, documents=None): """Prepare a message with text, images, and document content""" # If no media, return text only if not images and not documents: return text # Start with text content if documents and len(documents) > 0: # If there are documents, append their content to the text document_texts = [] for doc in documents: if doc is None: continue # Make sure to handle file objects properly doc_path = doc.name if hasattr(doc, 'name') else doc doc_text = extract_text_from_file(doc_path) if doc_text: document_texts.append(doc_text) # Add document content to text if document_texts: if not text: text = "Please analyze these documents:" else: text = f"{text}\n\nDocument content:\n\n" text += "\n\n".join(document_texts) # If no images, return text only if not images: return text # If we have images, create a multimodal content array content = [{"type": "text", "text": text}] # Add images if any if images: # Check if images is a list of image paths or file objects if isinstance(images, list): for img in images: if img is None: continue encoded_image = encode_image_to_base64(img) if encoded_image: content.append({ "type": "image_url", "image_url": {"url": encoded_image} }) else: # For single image or Gallery component logger.warning(f"Images is not a list: {type(images)}") # Try to handle as single image encoded_image = encode_image_to_base64(images) if encoded_image: content.append({ "type": "image_url", "image_url": {"url": encoded_image} }) return content def format_to_message_dict(history): """Convert history to proper message format""" messages = [] for item in history: if isinstance(item, dict) and "role" in item and "content" in item: # Already in the correct format messages.append(item) elif isinstance(item, list) and len(item) == 2: # Convert from old format [user_msg, ai_msg] human, ai = item if human: messages.append({"role": "user", "content": human}) if ai: messages.append({"role": "assistant", "content": ai}) return messages def process_uploaded_images(files): """Process uploaded image files""" file_paths = [] for file in files: if hasattr(file, 'name'): file_paths.append(file.name) return file_paths def filter_models(provider, search_term): """Filter models based on search term and provider""" if provider == "OpenRouter": all_models = [model[0] for model in OPENROUTER_ALL_MODELS] elif provider == "OpenAI": all_models = list(OPENAI_MODELS.keys()) elif provider == "HuggingFace": all_models = list(HUGGINGFACE_MODELS.keys()) elif provider == "Groq": all_models = list(GROQ_MODELS.keys()) elif provider == "Cohere": all_models = list(COHERE_MODELS.keys()) elif provider == "Together": all_models = list(TOGETHER_MODELS.keys()) elif provider == "OVH": all_models = list(OVH_MODELS.keys()) elif provider == "Cerebras": all_models = list(CEREBRAS_MODELS.keys()) elif provider == "GoogleAI": all_models = list(GOOGLEAI_MODELS.keys()) else: return [], None if not search_term: return all_models, all_models[0] if all_models else None filtered_models = [model for model in all_models if search_term.lower() in model.lower()] if filtered_models: return filtered_models, filtered_models[0] else: return all_models, all_models[0] if all_models else None def get_model_info(provider, model_choice): """Get model ID and context size based on provider and model name""" if provider == "OpenRouter": for name, model_id, ctx_size in OPENROUTER_ALL_MODELS: if name == model_choice: return model_id, ctx_size elif provider == "OpenAI": if model_choice in OPENAI_MODELS: return model_choice, OPENAI_MODELS[model_choice] elif provider == "HuggingFace": if model_choice in HUGGINGFACE_MODELS: return model_choice, HUGGINGFACE_MODELS[model_choice] elif provider == "Groq": if model_choice in GROQ_MODELS: return model_choice, GROQ_MODELS[model_choice] elif provider == "Cohere": if model_choice in COHERE_MODELS: return model_choice, COHERE_MODELS[model_choice] elif provider == "Together": if model_choice in TOGETHER_MODELS: return model_choice, TOGETHER_MODELS[model_choice] elif provider == "OVH": if model_choice in OVH_MODELS: return model_choice, OVH_MODELS[model_choice] elif provider == "Cerebras": if model_choice in CEREBRAS_MODELS: return model_choice, CEREBRAS_MODELS[model_choice] elif provider == "GoogleAI": if model_choice in GOOGLEAI_MODELS: return model_choice, GOOGLEAI_MODELS[model_choice] return None, 0 def update_context_display(provider, model_name): """Update context size display for the selected model""" _, ctx_size = get_model_info(provider, model_name) return f"{ctx_size:,}" if ctx_size else "Unknown" def is_vision_model(provider, model_name): """Check if a model supports vision/images""" # Safety check for None model name if model_name is None: return False if provider in VISION_MODELS: if model_name in VISION_MODELS[provider]: return True # Also check for common vision indicators in model names try: if any(x in model_name.lower() for x in ["vl", "vision", "visual", "llava", "gemini"]): return True except AttributeError: # In case model_name is not a string or has no lower method return False return False def update_model_info(provider, model_name): """Generate HTML info display for the selected model""" model_id, ctx_size = get_model_info(provider, model_name) if not model_id: return "
Model information not available
" # Check if this is a vision model is_vision = is_vision_model(provider, model_name) vision_badge = 'Vision' if is_vision else '' # For OpenRouter, show the model ID model_id_html = f"Model ID: {model_id}
" if provider == "OpenRouter" else "" # For others, the ID is the same as the name if provider != "OpenRouter": model_id_html = "" return f"""Context Size: {ctx_size:,} tokens
Provider: {provider}
{f'Features: Supports image understanding
' if is_vision else ''}Model information not available
" def update_vision_indicator(provider, model_choice): """Update the vision capability indicator""" # Safety check for None model if model_choice is None: return False return is_vision_model(provider, model_choice) def update_image_upload_visibility(provider, model_choice): """Show/hide image upload based on model vision capabilities""" # Safety check for None model if model_choice is None: return gr.update(visible=False) is_vision = is_vision_model(provider, model_choice) return gr.update(visible=is_vision) # Search model function def search_openrouter_models(search_term): """Filter OpenRouter models based on search term""" all_models = [model[0] for model in OPENROUTER_ALL_MODELS] if not search_term: return gr.update(choices=all_models, value=all_models[0] if all_models else None) filtered_models = [model for model in all_models if search_term.lower() in model.lower()] if filtered_models: return gr.update(choices=filtered_models, value=filtered_models[0]) else: return gr.update(choices=all_models, value=all_models[0] if all_models else None) def search_openai_models(search_term): """Filter OpenAI models based on search term""" all_models = list(OPENAI_MODELS.keys()) if not search_term: return gr.update(choices=all_models, value="gpt-3.5-turbo" if "gpt-3.5-turbo" in all_models else all_models[0] if all_models else None) filtered_models = [model for model in all_models if search_term.lower() in model.lower()] if filtered_models: return gr.update(choices=filtered_models, value=filtered_models[0]) else: return gr.update(choices=all_models, value="gpt-3.5-turbo" if "gpt-3.5-turbo" in all_models else all_models[0] if all_models else None) def search_hf_models(search_term): """Filter HuggingFace models based on search term""" all_models = list(HUGGINGFACE_MODELS.keys()) if not search_term: default_model = "mistralai/Mistral-7B-Instruct-v0.3" if "mistralai/Mistral-7B-Instruct-v0.3" in all_models else all_models[0] if all_models else None return gr.update(choices=all_models, value=default_model) filtered_models = [model for model in all_models if search_term.lower() in model.lower()] if filtered_models: return gr.update(choices=filtered_models, value=filtered_models[0]) else: default_model = "mistralai/Mistral-7B-Instruct-v0.3" if "mistralai/Mistral-7B-Instruct-v0.3" in all_models else all_models[0] if all_models else None return gr.update(choices=all_models, value=default_model) def search_groq_models(search_term): """Filter Groq models based on search term""" all_models = list(GROQ_MODELS.keys()) if not search_term: default_model = "llama-3.1-8b-instant" if "llama-3.1-8b-instant" in all_models else all_models[0] if all_models else None return gr.update(choices=all_models, value=default_model) filtered_models = [model for model in all_models if search_term.lower() in model.lower()] if filtered_models: return gr.update(choices=filtered_models, value=filtered_models[0]) else: default_model = "llama-3.1-8b-instant" if "llama-3.1-8b-instant" in all_models else all_models[0] if all_models else None return gr.update(choices=all_models, value=default_model) def search_cohere_models(search_term): """Filter Cohere models based on search term""" all_models = list(COHERE_MODELS.keys()) if not search_term: default_model = "command-r-plus" if "command-r-plus" in all_models else all_models[0] if all_models else None return gr.update(choices=all_models, value=default_model) filtered_models = [model for model in all_models if search_term.lower() in model.lower()] if filtered_models: return gr.update(choices=filtered_models, value=filtered_models[0]) else: default_model = "command-r-plus" if "command-r-plus" in all_models else all_models[0] if all_models else None return gr.update(choices=all_models, value=default_model) def search_together_models(search_term): """Filter Together models based on search term""" all_models = list(TOGETHER_MODELS.keys()) if not search_term: default_model = "meta-llama/Llama-3.1-8B-Instruct" if "meta-llama/Llama-3.1-8B-Instruct" in all_models else all_models[0] if all_models else None return gr.update(choices=all_models, value=default_model) filtered_models = [model for model in all_models if search_term.lower() in model.lower()] if filtered_models: return gr.update(choices=filtered_models, value=filtered_models[0]) else: default_model = "meta-llama/Llama-3.1-8B-Instruct" if "meta-llama/Llama-3.1-8B-Instruct" in all_models else all_models[0] if all_models else None return gr.update(choices=all_models, value=default_model) def search_ovh_models(search_term): """Filter OVH models based on search term""" all_models = list(OVH_MODELS.keys()) if not search_term: default_model = "ovh/llama-3.1-8b-instruct" if "ovh/llama-3.1-8b-instruct" in all_models else all_models[0] if all_models else None return gr.update(choices=all_models, value=default_model) filtered_models = [model for model in all_models if search_term.lower() in model.lower()] if filtered_models: return gr.update(choices=filtered_models, value=filtered_models[0]) else: default_model = "ovh/llama-3.1-8b-instruct" if "ovh/llama-3.1-8b-instruct" in all_models else all_models[0] if all_models else None return gr.update(choices=all_models, value=default_model) def search_cerebras_models(search_term): """Filter Cerebras models based on search term""" all_models = list(CEREBRAS_MODELS.keys()) if not search_term: default_model = "cerebras/llama-3.1-8b" if "cerebras/llama-3.1-8b" in all_models else all_models[0] if all_models else None return gr.update(choices=all_models, value=default_model) filtered_models = [model for model in all_models if search_term.lower() in model.lower()] if filtered_models: return gr.update(choices=filtered_models, value=filtered_models[0]) else: default_model = "cerebras/llama-3.1-8b" if "cerebras/llama-3.1-8b" in all_models else all_models[0] if all_models else None return gr.update(choices=all_models, value=default_model) def search_googleai_models(search_term): """Filter GoogleAI models based on search term""" all_models = list(GOOGLEAI_MODELS.keys()) if not search_term: default_model = "gemini-1.5-pro" if "gemini-1.5-pro" in all_models else all_models[0] if all_models else None return gr.update(choices=all_models, value=default_model) filtered_models = [model for model in all_models if search_term.lower() in model.lower()] if filtered_models: return gr.update(choices=filtered_models, value=filtered_models[0]) else: default_model = "gemini-1.5-pro" if "gemini-1.5-pro" in all_models else all_models[0] if all_models else None return gr.update(choices=all_models, value=default_model) def get_current_model(provider, openrouter_model, openai_model, hf_model, groq_model, cohere_model, together_model, ovh_model, cerebras_model, googleai_model): """Get the currently selected model based on provider""" if provider == "OpenRouter": return openrouter_model if openrouter_model else OPENROUTER_ALL_MODELS[0][0] if OPENROUTER_ALL_MODELS else None elif provider == "OpenAI": return openai_model if openai_model else "gpt-3.5-turbo" if "gpt-3.5-turbo" in OPENAI_MODELS else None elif provider == "HuggingFace": return hf_model if hf_model else "mistralai/Mistral-7B-Instruct-v0.3" if "mistralai/Mistral-7B-Instruct-v0.3" in HUGGINGFACE_MODELS else None elif provider == "Groq": return groq_model if groq_model else "llama-3.1-8b-instant" if "llama-3.1-8b-instant" in GROQ_MODELS else None elif provider == "Cohere": return cohere_model if cohere_model else "command-r-plus" if "command-r-plus" in COHERE_MODELS else None elif provider == "Together": return together_model if together_model else "meta-llama/Llama-3.1-8B-Instruct" if "meta-llama/Llama-3.1-8B-Instruct" in TOGETHER_MODELS else None elif provider == "OVH": return ovh_model if ovh_model else "ovh/llama-3.1-8b-instruct" if "ovh/llama-3.1-8b-instruct" in OVH_MODELS else None elif provider == "Cerebras": return cerebras_model if cerebras_model else "cerebras/llama-3.1-8b" if "cerebras/llama-3.1-8b" in CEREBRAS_MODELS else None elif provider == "GoogleAI": return googleai_model if googleai_model else "gemini-1.5-pro" if "gemini-1.5-pro" in GOOGLEAI_MODELS else None return None # Process uploaded images image_upload_btn.upload( fn=lambda files: files, inputs=image_upload_btn, outputs=images ) # Set up provider selection event provider_choice.change( fn=toggle_model_dropdowns, inputs=provider_choice, outputs=[ openrouter_model, openai_model, hf_model, groq_model, cohere_model, together_model, ovh_model, cerebras_model, googleai_model ] ).then( fn=update_context_for_provider, inputs=[provider_choice, openrouter_model, openai_model, hf_model, groq_model, cohere_model, together_model, ovh_model, cerebras_model, googleai_model], outputs=context_display ).then( fn=update_model_info_for_provider, inputs=[provider_choice, openrouter_model, openai_model, hf_model, groq_model, cohere_model, together_model, ovh_model, cerebras_model, googleai_model], outputs=model_info_display ).then( fn=lambda provider, model: update_vision_indicator( provider, get_current_model(provider, model, None, None, None, None, None, None, None, None) ), inputs=[provider_choice, openrouter_model], outputs=is_vision_indicator ).then( fn=lambda provider, model: update_image_upload_visibility( provider, get_current_model(provider, model, None, None, None, None, None, None, None, None) ), inputs=[provider_choice, openrouter_model], outputs=image_upload_container ) # Set up model search event - return model dropdown updates model_search.change( fn=lambda provider, search: [ search_openrouter_models(search) if provider == "OpenRouter" else gr.update(), search_openai_models(search) if provider == "OpenAI" else gr.update(), search_hf_models(search) if provider == "HuggingFace" else gr.update(), search_groq_models(search) if provider == "Groq" else gr.update(), search_cohere_models(search) if provider == "Cohere" else gr.update(), search_together_models(search) if provider == "Together" else gr.update(), search_ovh_models(search) if provider == "OVH" else gr.update(), search_cerebras_models(search) if provider == "Cerebras" else gr.update(), search_googleai_models(search) if provider == "GoogleAI" else gr.update() ], inputs=[provider_choice, model_search], outputs=[ openrouter_model, openai_model, hf_model, groq_model, cohere_model, together_model, ovh_model, cerebras_model, googleai_model ] ) # Set up model change events to update context display and model info openrouter_model.change( fn=lambda model: update_context_display("OpenRouter", model), inputs=openrouter_model, outputs=context_display ).then( fn=lambda model: update_model_info("OpenRouter", model), inputs=openrouter_model, outputs=model_info_display ).then( fn=lambda model: update_vision_indicator("OpenRouter", model), inputs=openrouter_model, outputs=is_vision_indicator ).then( fn=lambda model: update_image_upload_visibility("OpenRouter", model), inputs=openrouter_model, outputs=image_upload_container ) openai_model.change( fn=lambda model: update_context_display("OpenAI", model), inputs=openai_model, outputs=context_display ).then( fn=lambda model: update_model_info("OpenAI", model), inputs=openai_model, outputs=model_info_display ).then( fn=lambda model: update_vision_indicator("OpenAI", model), inputs=openai_model, outputs=is_vision_indicator ).then( fn=lambda model: update_image_upload_visibility("OpenAI", model), inputs=openai_model, outputs=image_upload_container ) hf_model.change( fn=lambda model: update_context_display("HuggingFace", model), inputs=hf_model, outputs=context_display ).then( fn=lambda model: update_model_info("HuggingFace", model), inputs=hf_model, outputs=model_info_display ).then( fn=lambda model: update_vision_indicator("HuggingFace", model), inputs=hf_model, outputs=is_vision_indicator ).then( fn=lambda model: update_image_upload_visibility("HuggingFace", model), inputs=hf_model, outputs=image_upload_container ) groq_model.change( fn=lambda model: update_context_display("Groq", model), inputs=groq_model, outputs=context_display ).then( fn=lambda model: update_model_info("Groq", model), inputs=groq_model, outputs=model_info_display ).then( fn=lambda model: update_vision_indicator("Groq", model), inputs=groq_model, outputs=is_vision_indicator ).then( fn=lambda model: update_image_upload_visibility("Groq", model), inputs=groq_model, outputs=image_upload_container ) cohere_model.change( fn=lambda model: update_context_display("Cohere", model), inputs=cohere_model, outputs=context_display ).then( fn=lambda model: update_model_info("Cohere", model), inputs=cohere_model, outputs=model_info_display ).then( fn=lambda model: update_vision_indicator("Cohere", model), inputs=cohere_model, outputs=is_vision_indicator ).then( fn=lambda model: update_image_upload_visibility("Cohere", model), inputs=cohere_model, outputs=image_upload_container ) together_model.change( fn=lambda model: update_context_display("Together", model), inputs=together_model, outputs=context_display ).then( fn=lambda model: update_model_info("Together", model), inputs=together_model, outputs=model_info_display ).then( fn=lambda model: update_vision_indicator("Together", model), inputs=together_model, outputs=is_vision_indicator ).then( fn=lambda model: update_image_upload_visibility("Together", model), inputs=together_model, outputs=image_upload_container ) ovh_model.change( fn=lambda model: update_context_display("OVH", model), inputs=ovh_model, outputs=context_display ).then( fn=lambda model: update_model_info("OVH", model), inputs=ovh_model, outputs=model_info_display ).then( fn=lambda model: update_vision_indicator("OVH", model), inputs=ovh_model, outputs=is_vision_indicator ).then( fn=lambda model: update_image_upload_visibility("OVH", model), inputs=ovh_model, outputs=image_upload_container ) cerebras_model.change( fn=lambda model: update_context_display("Cerebras", model), inputs=cerebras_model, outputs=context_display ).then( fn=lambda model: update_model_info("Cerebras", model), inputs=cerebras_model, outputs=model_info_display ).then( fn=lambda model: update_vision_indicator("Cerebras", model), inputs=cerebras_model, outputs=is_vision_indicator ).then( fn=lambda model: update_image_upload_visibility("Cerebras", model), inputs=cerebras_model, outputs=image_upload_container ) googleai_model.change( fn=lambda model: update_context_display("GoogleAI", model), inputs=googleai_model, outputs=context_display ).then( fn=lambda model: update_model_info("GoogleAI", model), inputs=googleai_model, outputs=model_info_display ).then( fn=lambda model: update_vision_indicator("GoogleAI", model), inputs=googleai_model, outputs=is_vision_indicator ).then( fn=lambda model: update_image_upload_visibility("GoogleAI", model), inputs=googleai_model, outputs=image_upload_container ) def handle_search(provider, search_term): """Handle search based on provider""" if provider == "OpenRouter": return search_openrouter_models(search_term) elif provider == "OpenAI": return search_openai_models(search_term) elif provider == "HuggingFace": return search_hf_models(search_term) elif provider == "Groq": return search_groq_models(search_term) elif provider == "Cohere": return search_cohere_models(search_term) elif provider == "Together": return search_together_models(search_term) elif provider == "OVH": return search_ovh_models(search_term) elif provider == "Cerebras": return search_cerebras_models(search_term) elif provider == "GoogleAI": return search_googleai_models(search_term) return None # Set up submission event def submit_message(message, history, provider, openrouter_model, openai_model, hf_model, groq_model, cohere_model, together_model, ovh_model, cerebras_model, googleai_model, temperature, max_tokens, top_p, frequency_penalty, presence_penalty, repetition_penalty, top_k, min_p, seed, top_a, stream_output, response_format, images, documents, reasoning_effort, system_message, transforms, openrouter_api_key, openai_api_key, hf_api_key, groq_api_key, cohere_api_key, together_api_key, googleai_api_key): """Submit message to selected provider and model""" # Get the currently selected model model_choice = get_current_model(provider, openrouter_model, openai_model, hf_model, groq_model, cohere_model, together_model, ovh_model, cerebras_model, googleai_model) # Check if model is selected if not model_choice: error_message = f"Error: No model selected for provider {provider}" return history + [ {"role": "user", "content": message}, {"role": "assistant", "content": error_message} ] # Select the appropriate API key based on the provider api_key_override = None if provider == "OpenRouter" and openrouter_api_key: api_key_override = openrouter_api_key elif provider == "OpenAI" and openai_api_key: api_key_override = openai_api_key elif provider == "HuggingFace" and hf_api_key: api_key_override = hf_api_key elif provider == "Groq" and groq_api_key: api_key_override = groq_api_key elif provider == "Cohere" and cohere_api_key: api_key_override = cohere_api_key elif provider == "Together" and together_api_key: api_key_override = together_api_key elif provider == "GoogleAI" and googleai_api_key: api_key_override = googleai_api_key # Call the ask_ai function with the appropriate parameters return ask_ai( message=message, history=history, provider=provider, model_choice=model_choice, temperature=temperature, max_tokens=max_tokens, top_p=top_p, frequency_penalty=frequency_penalty, presence_penalty=presence_penalty, repetition_penalty=repetition_penalty, top_k=top_k, min_p=min_p, seed=seed, top_a=top_a, stream_output=stream_output, response_format=response_format, images=images, documents=documents, reasoning_effort=reasoning_effort, system_message=system_message, transforms=transforms, api_key_override=api_key_override ) # Submit button click event submit_btn.click( fn=submit_message, inputs=[ message, chatbot, provider_choice, openrouter_model, openai_model, hf_model, groq_model, cohere_model, together_model, ovh_model, cerebras_model, googleai_model, temperature, max_tokens, top_p, frequency_penalty, presence_penalty, repetition_penalty, top_k, min_p, seed, top_a, stream_output, response_format, images, documents, reasoning_effort, system_message, transforms, openrouter_api_key, openai_api_key, hf_api_key, groq_api_key, cohere_api_key, together_api_key, googleai_api_key ], outputs=chatbot, show_progress="minimal", ).then( fn=lambda: "", # Clear message box after sending inputs=None, outputs=message ) # Also submit on Enter key message.submit( fn=submit_message, inputs=[ message, chatbot, provider_choice, openrouter_model, openai_model, hf_model, groq_model, cohere_model, together_model, ovh_model, cerebras_model, googleai_model, temperature, max_tokens, top_p, frequency_penalty, presence_penalty, repetition_penalty, top_k, min_p, seed, top_a, stream_output, response_format, images, documents, reasoning_effort, system_message, transforms, openrouter_api_key, openai_api_key, hf_api_key, groq_api_key, cohere_api_key, together_api_key, googleai_api_key ], outputs=chatbot, show_progress="minimal", ).then( fn=lambda: "", # Clear message box after sending inputs=None, outputs=message ) # Clear chat button clear_btn.click( fn=clear_chat, inputs=[], outputs=[ chatbot, message, images, documents, temperature, max_tokens, top_p, frequency_penalty, presence_penalty, repetition_penalty, top_k, min_p, seed, top_a, stream_output, response_format, reasoning_effort, system_message, transforms ] ) return demo # Launch the app if __name__ == "__main__": # Check API keys and print status missing_keys = [] if not OPENROUTER_API_KEY: logger.warning("WARNING: OPENROUTER_API_KEY environment variable is not set") missing_keys.append("OpenRouter") if not OPENAI_API_KEY: logger.warning("WARNING: OPENAI_API_KEY environment variable is not set") missing_keys.append("OpenAI") if not GROQ_API_KEY: logger.warning("WARNING: GROQ_API_KEY environment variable is not set") missing_keys.append("Groq") if not COHERE_API_KEY: logger.warning("WARNING: COHERE_API_KEY environment variable is not set") missing_keys.append("Cohere") if not TOGETHER_API_KEY: logger.warning("WARNING: TOGETHER_API_KEY environment variable is not set") missing_keys.append("Together") if not GOOGLEAI_API_KEY: logger.warning("WARNING: GOOGLEAI_API_KEY environment variable is not set") missing_keys.append("GoogleAI") if missing_keys: print("Missing API keys for the following providers:") for key in missing_keys: print(f"- {key}") print("\nYou can still use the application, but some providers will require API keys.") print("You can provide API keys through environment variables or use the API Key Override field.") if "OpenRouter" in missing_keys: print("\nNote: OpenRouter offers free tier access to many models!") if "OVH" not in missing_keys and "Cerebras" not in missing_keys: print("\nNote: OVH AI Endpoints (beta) and Cerebras offer free usage tiers!") print("\nStarting CrispChat application...") demo = create_app() demo.launch( server_name="0.0.0.0", server_port=7860, debug=True, show_error=True )