File size: 3,353 Bytes
a13c2bb c96734b 1ca78b8 c96734b 9918749 a13c2bb 1ca78b8 9918749 a13c2bb 1ca78b8 81d1619 32ae536 9918749 81d1619 9918749 a13c2bb 32ae536 81d1619 9144903 9918749 9144903 9918749 1ca78b8 cef7f39 25f51d0 9918749 25f51d0 cef7f39 25f51d0 9918749 81d1619 25f51d0 81d1619 a13c2bb 81d1619 9918749 32ae536 3e6631d 9918749 32ae536 81d1619 32ae536 81d1619 32ae536 82deaf2 9918749 c96734b 9918749 |
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 |
import os
import gradio as gr
import requests
import json
# API key
OPENROUTER_API_KEY = os.environ.get("OPENROUTER_API_KEY", "")
# Basic model list
MODELS = [
("Gemini Pro 2.0", "google/gemini-2.0-pro-exp-02-05:free"),
("Llama 3.2 Vision", "meta-llama/llama-3.2-11b-vision-instruct:free")
]
def format_to_message_dict(history):
"""Convert history to proper message format"""
messages = []
for pair in history:
if len(pair) == 2:
human, ai = pair
if human:
messages.append({"role": "user", "content": human})
if ai:
messages.append({"role": "assistant", "content": ai})
return messages
def ask_ai(message, chatbot, model_choice):
"""Basic AI query function"""
if not message.strip():
return chatbot, ""
# Get model ID
model_id = MODELS[0][1] # Default
for name, model_id_value in MODELS:
if name == model_choice:
model_id = model_id_value
break
# Create messages from chatbot history
messages = format_to_message_dict(chatbot)
# Add current message
messages.append({"role": "user", "content": message})
# Call API
try:
response = requests.post(
"https://openrouter.ai/api/v1/chat/completions",
headers={
"Content-Type": "application/json",
"Authorization": f"Bearer {OPENROUTER_API_KEY}",
"HTTP-Referer": "https://huggingface.co/spaces"
},
json={
"model": model_id,
"messages": messages,
"temperature": 0.7,
"max_tokens": 1000
},
timeout=60
)
if response.status_code == 200:
result = response.json()
ai_response = result.get("choices", [{}])[0].get("message", {}).get("content", "")
chatbot = chatbot + [[message, ai_response]]
else:
chatbot = chatbot + [[message, f"Error: Status code {response.status_code}"]]
except Exception as e:
chatbot = chatbot + [[message, f"Error: {str(e)}"]]
return chatbot, ""
def clear_chat():
return [], ""
# Create minimal interface
with gr.Blocks() as demo:
gr.Markdown("# Simple AI Chat")
chatbot = gr.Chatbot(height=400)
with gr.Row():
message = gr.Textbox(
placeholder="Type your message here...",
label="Message",
lines=2
)
with gr.Row():
model_choice = gr.Radio(
[name for name, _ in MODELS],
value=MODELS[0][0],
label="Model"
)
with gr.Row():
submit_btn = gr.Button("Send")
clear_btn = gr.Button("Clear Chat")
# Set up events
submit_btn.click(
fn=ask_ai,
inputs=[message, chatbot, model_choice],
outputs=[chatbot, message]
)
message.submit(
fn=ask_ai,
inputs=[message, chatbot, model_choice],
outputs=[chatbot, message]
)
clear_btn.click(
fn=clear_chat,
inputs=[],
outputs=[chatbot, message]
)
# Launch directly with Gradio's built-in server
if __name__ == "__main__":
demo.launch(server_name="0.0.0.0", server_port=7860) |