CrispChat / app.py
cstr's picture
Update app.py
453c62c verified
raw
history blame
34.3 kB
import os
import gradio as gr
import requests
import json
import base64
import logging
import io
# Configure logging
logging.basicConfig(level=logging.INFO, format='%(asctime)s - %(name)s - %(levelname)s - %(message)s')
logger = logging.getLogger(__name__)
# Gracefully import libraries with fallbacks
try:
from PIL import Image
except ImportError:
logger.warning("PIL not installed. Image processing will be limited.")
Image = None
try:
import PyPDF2
except ImportError:
logger.warning("PyPDF2 not installed. PDF processing will be limited.")
PyPDF2 = None
try:
import markdown
except ImportError:
logger.warning("Markdown not installed. Markdown processing will be limited.")
markdown = None
# API key
OPENROUTER_API_KEY = os.environ.get("OPENROUTER_API_KEY", "")
# Complete model list with context sizes - as per requested list
MODELS = [
# 1M+ Context Models
{"category": "1M+ Context", "models": [
("Google: Gemini Pro 2.0 Experimental", "google/gemini-2.0-pro-exp-02-05:free", 2000000),
("Google: Gemini 2.0 Flash Thinking Experimental 01-21", "google/gemini-2.0-flash-thinking-exp:free", 1048576),
("Google: Gemini Flash 2.0 Experimental", "google/gemini-2.0-flash-exp:free", 1048576),
("Google: Gemini Pro 2.5 Experimental", "google/gemini-2.5-pro-exp-03-25:free", 1000000),
("Google: Gemini Flash 1.5 8B Experimental", "google/gemini-flash-1.5-8b-exp", 1000000),
]},
# 100K-1M Context Models
{"category": "100K+ Context", "models": [
("DeepSeek: DeepSeek R1 Zero", "deepseek/deepseek-r1-zero:free", 163840),
("DeepSeek: R1", "deepseek/deepseek-r1:free", 163840),
("DeepSeek: DeepSeek V3 Base", "deepseek/deepseek-v3-base:free", 131072),
("DeepSeek: DeepSeek V3 0324", "deepseek/deepseek-chat-v3-0324:free", 131072),
("Google: Gemma 3 4B", "google/gemma-3-4b-it:free", 131072),
("Google: Gemma 3 12B", "google/gemma-3-12b-it:free", 131072),
("Nous: DeepHermes 3 Llama 3 8B Preview", "nousresearch/deephermes-3-llama-3-8b-preview:free", 131072),
("Qwen: Qwen2.5 VL 72B Instruct", "qwen/qwen2.5-vl-72b-instruct:free", 131072),
("DeepSeek: DeepSeek V3", "deepseek/deepseek-chat:free", 131072),
("NVIDIA: Llama 3.1 Nemotron 70B Instruct", "nvidia/llama-3.1-nemotron-70b-instruct:free", 131072),
("Meta: Llama 3.2 1B Instruct", "meta-llama/llama-3.2-1b-instruct:free", 131072),
("Meta: Llama 3.2 11B Vision Instruct", "meta-llama/llama-3.2-11b-vision-instruct:free", 131072),
("Meta: Llama 3.1 8B Instruct", "meta-llama/llama-3.1-8b-instruct:free", 131072),
("Mistral: Mistral Nemo", "mistralai/mistral-nemo:free", 128000),
]},
# 64K-100K Context Models
{"category": "64K-100K Context", "models": [
("Mistral: Mistral Small 3.1 24B", "mistralai/mistral-small-3.1-24b-instruct:free", 96000),
("Google: Gemma 3 27B", "google/gemma-3-27b-it:free", 96000),
("Qwen: Qwen2.5 VL 3B Instruct", "qwen/qwen2.5-vl-3b-instruct:free", 64000),
("DeepSeek: R1 Distill Qwen 14B", "deepseek/deepseek-r1-distill-qwen-14b:free", 64000),
("Qwen: Qwen2.5-VL 7B Instruct", "qwen/qwen-2.5-vl-7b-instruct:free", 64000),
]},
# 32K-64K Context Models
{"category": "32K-64K Context", "models": [
("Google: LearnLM 1.5 Pro Experimental", "google/learnlm-1.5-pro-experimental:free", 40960),
("Qwen: QwQ 32B", "qwen/qwq-32b:free", 40000),
("Google: Gemini 2.0 Flash Thinking Experimental", "google/gemini-2.0-flash-thinking-exp-1219:free", 40000),
("Bytedance: UI-TARS 72B", "bytedance-research/ui-tars-72b:free", 32768),
("Qwerky 72b", "featherless/qwerky-72b:free", 32768),
("OlympicCoder 7B", "open-r1/olympiccoder-7b:free", 32768),
("OlympicCoder 32B", "open-r1/olympiccoder-32b:free", 32768),
("Google: Gemma 3 1B", "google/gemma-3-1b-it:free", 32768),
("Reka: Flash 3", "rekaai/reka-flash-3:free", 32768),
("Dolphin3.0 R1 Mistral 24B", "cognitivecomputations/dolphin3.0-r1-mistral-24b:free", 32768),
("Dolphin3.0 Mistral 24B", "cognitivecomputations/dolphin3.0-mistral-24b:free", 32768),
("Mistral: Mistral Small 3", "mistralai/mistral-small-24b-instruct-2501:free", 32768),
("Qwen2.5 Coder 32B Instruct", "qwen/qwen-2.5-coder-32b-instruct:free", 32768),
("Qwen2.5 72B Instruct", "qwen/qwen-2.5-72b-instruct:free", 32768),
]},
# 8K-32K Context Models
{"category": "8K-32K Context", "models": [
("Meta: Llama 3.2 3B Instruct", "meta-llama/llama-3.2-3b-instruct:free", 20000),
("Qwen: QwQ 32B Preview", "qwen/qwq-32b-preview:free", 16384),
("DeepSeek: R1 Distill Qwen 32B", "deepseek/deepseek-r1-distill-qwen-32b:free", 16000),
("Qwen: Qwen2.5 VL 32B Instruct", "qwen/qwen2.5-vl-32b-instruct:free", 8192),
("Moonshot AI: Moonlight 16B A3B Instruct", "moonshotai/moonlight-16b-a3b-instruct:free", 8192),
("DeepSeek: R1 Distill Llama 70B", "deepseek/deepseek-r1-distill-llama-70b:free", 8192),
("Qwen 2 7B Instruct", "qwen/qwen-2-7b-instruct:free", 8192),
("Google: Gemma 2 9B", "google/gemma-2-9b-it:free", 8192),
("Mistral: Mistral 7B Instruct", "mistralai/mistral-7b-instruct:free", 8192),
("Microsoft: Phi-3 Mini 128K Instruct", "microsoft/phi-3-mini-128k-instruct:free", 8192),
("Microsoft: Phi-3 Medium 128K Instruct", "microsoft/phi-3-medium-128k-instruct:free", 8192),
("Meta: Llama 3 8B Instruct", "meta-llama/llama-3-8b-instruct:free", 8192),
("OpenChat 3.5 7B", "openchat/openchat-7b:free", 8192),
("Meta: Llama 3.3 70B Instruct", "meta-llama/llama-3.3-70b-instruct:free", 8000),
]},
# <8K Context Models
{"category": "4K Context", "models": [
("AllenAI: Molmo 7B D", "allenai/molmo-7b-d:free", 4096),
("Rogue Rose 103B v0.2", "sophosympatheia/rogue-rose-103b-v0.2:free", 4096),
("Toppy M 7B", "undi95/toppy-m-7b:free", 4096),
("Hugging Face: Zephyr 7B", "huggingfaceh4/zephyr-7b-beta:free", 4096),
("MythoMax 13B", "gryphe/mythomax-l2-13b:free", 4096),
]},
# Vision-capable Models
{"category": "Vision Models", "models": [
("Meta: Llama 3.2 11B Vision Instruct", "meta-llama/llama-3.2-11b-vision-instruct:free", 131072),
("Qwen: Qwen2.5 VL 72B Instruct", "qwen/qwen2.5-vl-72b-instruct:free", 131072),
("Qwen: Qwen2.5 VL 32B Instruct", "qwen/qwen2.5-vl-32b-instruct:free", 8192),
("Qwen: Qwen2.5-VL 7B Instruct", "qwen/qwen-2.5-vl-7b-instruct:free", 64000),
("Qwen: Qwen2.5 VL 3B Instruct", "qwen/qwen2.5-vl-3b-instruct:free", 64000),
("Google: Gemini Pro 2.0 Experimental", "google/gemini-2.0-pro-exp-02-05:free", 2000000),
("Google: Gemini Pro 2.5 Experimental", "google/gemini-2.5-pro-exp-03-25:free", 1000000),
("Google: Gemini 2.0 Flash Thinking Experimental", "google/gemini-2.0-flash-thinking-exp:free", 1048576),
("Google: Gemini Flash 2.0 Experimental", "google/gemini-2.0-flash-exp:free", 1048576),
("AllenAI: Molmo 7B D", "allenai/molmo-7b-d:free", 4096),
]},
]
# Flatten model list for easy searching
ALL_MODELS = []
for category in MODELS:
for model in category["models"]:
if model not in ALL_MODELS: # Avoid duplicates
ALL_MODELS.append(model)
def format_to_message_dict(history):
"""Convert history to proper message format"""
messages = []
for pair in history:
if len(pair) == 2:
human, ai = pair
if human:
messages.append({"role": "user", "content": human})
if ai:
messages.append({"role": "assistant", "content": ai})
return messages
def encode_image_to_base64(image_path):
"""Encode an image file to base64 string"""
try:
if isinstance(image_path, str): # File path as string
with open(image_path, "rb") as image_file:
encoded_string = base64.b64encode(image_file.read()).decode('utf-8')
file_extension = image_path.split('.')[-1].lower()
mime_type = f"image/{file_extension}"
if file_extension == "jpg" or file_extension == "jpeg":
mime_type = "image/jpeg"
elif file_extension == "png":
mime_type = "image/png"
elif file_extension == "webp":
mime_type = "image/webp"
return f"data:{mime_type};base64,{encoded_string}"
else: # Pillow Image or file-like object
if Image is not None:
buffered = io.BytesIO()
image_path.save(buffered, format="PNG")
encoded_string = base64.b64encode(buffered.getvalue()).decode('utf-8')
return f"data:image/png;base64,{encoded_string}"
else:
logger.error("PIL is not installed, cannot process image object")
return None
except Exception as e:
logger.error(f"Error encoding image: {str(e)}")
return None
def extract_text_from_file(file_path):
"""Extract text from various file types"""
try:
file_extension = file_path.split('.')[-1].lower()
if file_extension == 'pdf':
if PyPDF2 is not None:
text = ""
with open(file_path, 'rb') as file:
pdf_reader = PyPDF2.PdfReader(file)
for page_num in range(len(pdf_reader.pages)):
page = pdf_reader.pages[page_num]
text += page.extract_text() + "\n\n"
return text
else:
return "PDF processing is not available (PyPDF2 not installed)"
elif file_extension == 'md':
with open(file_path, 'r', encoding='utf-8') as file:
md_text = file.read()
return md_text
elif file_extension == 'txt':
with open(file_path, 'r', encoding='utf-8') as file:
return file.read()
else:
return f"Unsupported file type: {file_extension}"
except Exception as e:
logger.error(f"Error extracting text from file: {str(e)}")
return f"Error processing file: {str(e)}"
def prepare_message_with_media(text, images=None, documents=None):
"""Prepare a message with text, images, and document content"""
# If no media, return text only
if not images and not documents:
return text
# Start with text content
if documents and len(documents) > 0:
# If there are documents, append their content to the text
document_texts = []
for doc in documents:
if doc is None:
continue
doc_text = extract_text_from_file(doc)
if doc_text:
document_texts.append(doc_text)
# Add document content to text
if document_texts:
if not text:
text = "Please analyze these documents:"
else:
text = f"{text}\n\nDocument content:\n\n"
text += "\n\n".join(document_texts)
# If no images, return text only
if not images:
return text
# If we have images, create a multimodal content array
content = [{"type": "text", "text": text}]
# Add images if any
if images:
for img in images:
if img is None:
continue
encoded_image = encode_image_to_base64(img)
if encoded_image:
content.append({
"type": "image_url",
"image_url": {"url": encoded_image}
})
return content
def filter_models(search_term):
"""Filter models based on search term"""
if not search_term:
return gr.Dropdown.update(choices=[model[0] for model in ALL_MODELS], value=ALL_MODELS[0][0])
filtered_models = [model[0] for model in ALL_MODELS if search_term.lower() in model[0].lower()]
if filtered_models:
return gr.Dropdown.update(choices=filtered_models, value=filtered_models[0])
else:
return gr.Dropdown.update(choices=[model[0] for model in ALL_MODELS], value=ALL_MODELS[0][0])
def get_model_info(model_name):
"""Get model information by name"""
for model in ALL_MODELS:
if model[0] == model_name:
return model
return None
def update_context_display(model_name):
"""Update the context size display based on the selected model"""
model_info = get_model_info(model_name)
if model_info:
name, model_id, context_size = model_info
context_formatted = f"{context_size:,}"
return f"{context_formatted} tokens"
return "Unknown"
def update_category_models(category):
"""Update models list when category changes"""
for cat in MODELS:
if cat["category"] == category:
return gr.Radio.update(choices=[model[0] for model in cat["models"]], value=cat["models"][0][0])
return gr.Radio.update(choices=[], value=None)
def ask_ai(message, chatbot, model_choice, temperature, max_tokens, top_p,
frequency_penalty, presence_penalty, repetition_penalty, top_k,
min_p, seed, top_a, stream_output, response_format,
images, documents, reasoning_effort, system_message, transforms):
"""Comprehensive AI query function with all parameters"""
if not message.strip() and not images and not documents:
return chatbot, ""
# Get model ID and context size
model_id = None
context_size = 0
for name, model_id_value, ctx_size in ALL_MODELS:
if name == model_choice:
model_id = model_id_value
context_size = ctx_size
break
if model_id is None:
logger.error(f"Model not found: {model_choice}")
return chatbot + [[message, "Error: Model not found"]], ""
# Create messages from chatbot history
messages = format_to_message_dict(chatbot)
# Add system message if provided
if system_message and system_message.strip():
# Insert at the beginning to override any existing system message
for i, msg in enumerate(messages):
if msg.get("role") == "system":
messages.pop(i)
break
messages.insert(0, {"role": "system", "content": system_message.strip()})
# Prepare message with images and documents if any
content = prepare_message_with_media(message, images, documents)
# Add current message
messages.append({"role": "user", "content": content})
# Call API
try:
logger.info(f"Sending request to model: {model_id}")
# Build the comprehensive payload with all parameters
payload = {
"model": model_id,
"messages": messages,
"temperature": temperature,
"max_tokens": max_tokens,
"top_p": top_p,
"frequency_penalty": frequency_penalty,
"presence_penalty": presence_penalty,
"repetition_penalty": repetition_penalty if repetition_penalty != 1.0 else None,
"top_k": top_k,
"min_p": min_p if min_p > 0 else None,
"seed": seed if seed > 0 else None,
"top_a": top_a if top_a > 0 else None,
"stream": stream_output
}
# Add response format if not default
if response_format == "json_object":
payload["response_format"] = {"type": "json_object"}
# Add reasoning if selected
if reasoning_effort != "none":
payload["reasoning"] = {
"effort": reasoning_effort
}
# Add transforms if selected
if transforms:
payload["transforms"] = transforms
# Remove None values
payload = {k: v for k, v in payload.items() if v is not None}
logger.info(f"Request payload: {json.dumps(payload, default=str)}")
response = requests.post(
"https://openrouter.ai/api/v1/chat/completions",
headers={
"Content-Type": "application/json",
"Authorization": f"Bearer {OPENROUTER_API_KEY}",
"HTTP-Referer": "https://huggingface.co/spaces"
},
json=payload,
timeout=180, # Longer timeout for document processing and streaming
stream=stream_output
)
logger.info(f"Response status: {response.status_code}")
if stream_output and response.status_code == 200:
# Handle streaming response
chatbot = chatbot + [[message, ""]]
for line in response.iter_lines():
if line:
line = line.decode('utf-8')
if line.startswith('data: '):
data = line[6:]
if data.strip() == '[DONE]':
break
try:
chunk = json.loads(data)
if "choices" in chunk and len(chunk["choices"]) > 0:
delta = chunk["choices"][0].get("delta", {})
if "content" in delta and delta["content"]:
chatbot[-1][1] += delta["content"]
yield chatbot, ""
except json.JSONDecodeError:
continue
return chatbot, ""
elif response.status_code == 200:
# Handle normal response
result = response.json()
ai_response = result.get("choices", [{}])[0].get("message", {}).get("content", "")
chatbot = chatbot + [[message, ai_response]]
# Log token usage if available
if "usage" in result:
logger.info(f"Token usage: {result['usage']}")
else:
response_text = response.text
logger.info(f"Error response body: {response_text}")
error_message = f"Error: Status code {response.status_code}\n\nResponse: {response_text}"
chatbot = chatbot + [[message, error_message]]
except Exception as e:
logger.error(f"Exception during API call: {str(e)}")
chatbot = chatbot + [[message, f"Error: {str(e)}"]]
return chatbot, ""
def process_uploaded_images(files):
"""Process uploaded image files"""
return [file.name for file in files]
def clear_chat():
"""Reset all inputs"""
return [], "", [], [], 0.7, 1000, 0.8, 0.0, 0.0, 1.0, 40, 0.1, 0, 0.0, False, "default", "none", "", []
# Create requirements.txt content
requirements = """
gradio>=4.44.1
requests>=2.28.1
Pillow>=9.0.0
PyPDF2>=3.0.0
markdown>=3.4.1
"""
# Main application
def create_app():
with gr.Blocks(css="""
.context-size {
font-size: 0.9em;
color: #666;
margin-left: 10px;
}
footer { display: none !important; }
.model-selection-row {
display: flex;
align-items: center;
}
.parameter-grid {
display: grid;
grid-template-columns: 1fr 1fr;
gap: 10px;
}
""") as demo:
gr.Markdown("""
# CrispChat
Chat with various AI models from OpenRouter with support for images and documents.
""")
with gr.Row():
with gr.Column(scale=2):
chatbot = gr.Chatbot(
height=500,
show_copy_button=True,
show_label=False,
avatar_images=(None, "https://upload.wikimedia.org/wikipedia/commons/0/04/ChatGPT_logo.svg"),
type="messages" # Fixed: Use messages format instead of tuples
)
with gr.Row():
message = gr.Textbox(
placeholder="Type your message here...",
label="Message",
lines=2
)
with gr.Row():
with gr.Column(scale=3):
submit_btn = gr.Button("Send", variant="primary")
with gr.Column(scale=1):
clear_btn = gr.Button("Clear Chat", variant="secondary")
with gr.Row():
# Image upload
with gr.Accordion("Upload Images (for vision models)", open=False):
images = gr.Gallery(
label="Uploaded Images",
show_label=True,
columns=4,
height="auto",
object_fit="contain"
)
image_upload_btn = gr.UploadButton(
label="Upload Images",
file_types=["image"],
file_count="multiple"
)
# Document upload
with gr.Accordion("Upload Documents (PDF, MD, TXT)", open=False):
documents = gr.File(
label="Uploaded Documents",
file_types=[".pdf", ".md", ".txt"],
file_count="multiple"
)
with gr.Column(scale=1):
with gr.Group():
gr.Markdown("### Model Selection")
with gr.Row(elem_classes="model-selection-row"):
model_search = gr.Textbox(
placeholder="Search models...",
label="",
show_label=False
)
with gr.Row(elem_classes="model-selection-row"):
model_choice = gr.Dropdown(
[model[0] for model in ALL_MODELS],
value=ALL_MODELS[0][0],
label="Model"
)
context_display = gr.Textbox(
value=update_context_display(ALL_MODELS[0][0]),
label="Context",
interactive=False,
elem_classes="context-size"
)
# Model category selection
with gr.Accordion("Browse by Category", open=False):
model_categories = gr.Radio(
[category["category"] for category in MODELS],
label="Categories",
value=MODELS[0]["category"]
)
category_models = gr.Radio(
[model[0] for model in MODELS[0]["models"]],
label="Models in Category"
)
with gr.Accordion("Generation Parameters", open=False):
with gr.Group(elem_classes="parameter-grid"):
temperature = gr.Slider(
minimum=0.0,
maximum=2.0,
value=0.7,
step=0.1,
label="Temperature"
)
max_tokens = gr.Slider(
minimum=100,
maximum=4000,
value=1000,
step=100,
label="Max Tokens"
)
top_p = gr.Slider(
minimum=0.1,
maximum=1.0,
value=0.8,
step=0.1,
label="Top P"
)
frequency_penalty = gr.Slider(
minimum=-2.0,
maximum=2.0,
value=0.0,
step=0.1,
label="Frequency Penalty"
)
presence_penalty = gr.Slider(
minimum=-2.0,
maximum=2.0,
value=0.0,
step=0.1,
label="Presence Penalty"
)
reasoning_effort = gr.Radio(
["none", "low", "medium", "high"],
value="none",
label="Reasoning Effort"
)
with gr.Accordion("Advanced Options", open=False):
with gr.Row():
with gr.Column():
repetition_penalty = gr.Slider(
minimum=0.1,
maximum=2.0,
value=1.0,
step=0.1,
label="Repetition Penalty"
)
top_k = gr.Slider(
minimum=1,
maximum=100,
value=40,
step=1,
label="Top K"
)
min_p = gr.Slider(
minimum=0.0,
maximum=1.0,
value=0.1,
step=0.05,
label="Min P"
)
with gr.Column():
seed = gr.Number(
value=0,
label="Seed (0 for random)",
precision=0
)
top_a = gr.Slider(
minimum=0.0,
maximum=1.0,
value=0.0,
step=0.05,
label="Top A"
)
stream_output = gr.Checkbox(
label="Stream Output",
value=False
)
with gr.Row():
response_format = gr.Radio(
["default", "json_object"],
value="default",
label="Response Format"
)
gr.Markdown("""
* **json_object**: Forces the model to respond with valid JSON only.
* Only available on certain models - check model support on OpenRouter.
""")
# Custom instructing options
with gr.Accordion("Custom Instructions", open=False):
system_message = gr.Textbox(
placeholder="Enter a system message to guide the model's behavior...",
label="System Message",
lines=3
)
transforms = gr.CheckboxGroup(
["prompt_optimize", "prompt_distill", "prompt_compress"],
label="Prompt Transforms (OpenRouter specific)"
)
gr.Markdown("""
* **prompt_optimize**: Improve prompt for better responses.
* **prompt_distill**: Compress prompt to use fewer tokens without changing meaning.
* **prompt_compress**: Aggressively compress prompt to fit larger contexts.
""")
# Add a model information section
with gr.Accordion("About Selected Model", open=False):
model_info_display = gr.HTML(
value="<p>Select a model to see details</p>"
)
# Add usage instructions
with gr.Accordion("Usage Instructions", open=False):
gr.Markdown("""
## Basic Usage
1. Type your message in the input box
2. Select a model from the dropdown
3. Click "Send" or press Enter
## Working with Files
- **Images**: Upload images to use with vision-capable models
- **Documents**: Upload PDF, Markdown, or text files to analyze their content
## Advanced Parameters
- **Temperature**: Controls randomness (higher = more creative, lower = more deterministic)
- **Max Tokens**: Maximum length of the response
- **Top P**: Nucleus sampling threshold (higher = consider more tokens)
- **Reasoning Effort**: Some models can show their reasoning process
## Tips
- For code generation, use models like Qwen Coder
- For visual tasks, choose vision-capable models
- For long context, check the context window size next to the model name
""")
# Add a footer with version info
footer_md = gr.Markdown("""
---
### OpenRouter AI Chat Interface v1.0
Built with ❤️ using Gradio and OpenRouter API | Context sizes shown next to model names
""")
# Connect model search to dropdown filter
model_search.change(
fn=filter_models,
inputs=[model_search],
outputs=[model_choice]
)
# Update context display when model changes
model_choice.change(
fn=update_context_display,
inputs=[model_choice],
outputs=[context_display]
)
# Update model list when category changes
model_categories.change(
fn=update_category_models,
inputs=[model_categories],
outputs=[category_models]
)
# Update main model choice when category model is selected
category_models.change(
fn=lambda x: x,
inputs=[category_models],
outputs=[model_choice]
)
# Process uploaded images
image_upload_btn.upload(
fn=process_uploaded_images,
inputs=[image_upload_btn],
outputs=[images]
)
# Update model info when model changes
def update_model_info(model_name):
model_info = get_model_info(model_name)
if model_info:
name, model_id, context_size = model_info
return f"""
<div class="model-info">
<h3>{name}</h3>
<p><strong>Model ID:</strong> {model_id}</p>
<p><strong>Context Size:</strong> {context_size:,} tokens</p>
<p><strong>Provider:</strong> {model_id.split('/')[0]}</p>
</div>
"""
return "<p>Model information not available</p>"
model_choice.change(
fn=update_model_info,
inputs=[model_choice],
outputs=[model_info_display]
)
# Set up events for the submit button
submit_btn.click(
fn=ask_ai,
inputs=[
message, chatbot, model_choice, temperature, max_tokens,
top_p, frequency_penalty, presence_penalty, repetition_penalty,
top_k, min_p, seed, top_a, stream_output, response_format,
images, documents, reasoning_effort, system_message, transforms
],
outputs=[chatbot, message]
)
# Set up events for message submission (pressing Enter)
message.submit(
fn=ask_ai,
inputs=[
message, chatbot, model_choice, temperature, max_tokens,
top_p, frequency_penalty, presence_penalty, repetition_penalty,
top_k, min_p, seed, top_a, stream_output, response_format,
images, documents, reasoning_effort, system_message, transforms
],
outputs=[chatbot, message]
)
# Set up events for the clear button
clear_btn.click(
fn=clear_chat,
inputs=[],
outputs=[
chatbot, message, images, documents, temperature,
max_tokens, top_p, frequency_penalty, presence_penalty,
repetition_penalty, top_k, min_p, seed, top_a, stream_output,
response_format, reasoning_effort, system_message, transforms
]
)
return demo
# Launch the app
if __name__ == "__main__":
demo = create_app()
demo.launch(server_name="0.0.0.0", server_port=7860)