Spaces:
Runtime error
Runtime error
File size: 4,444 Bytes
95b6b99 ce0b559 95b6b99 ce0b559 95b6b99 ce0b559 95b6b99 ce0b559 95b6b99 ce0b559 95b6b99 ce0b559 95b6b99 ce0b559 |
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 |
import gradio as gr
import pollinations as pl
import pandas as pd
import os
from datetime import datetime
from huggingface_hub import HfApi
import pyarrow.parquet as pq
import pyarrow as pa
import requests
# Initialize Pollinations text model
default_model = "openai"
model = pl.Text(
model=default_model,
system="You are a helpful AI assistant.",
contextual=True,
seed="random",
reasoning_effort="medium"
)
# Hugging Face setup
HF_TOKEN = os.getenv("HF_TOKEN") # Set in HF Space secrets
REPO_ID = "kulia-moon/conza"
api = HfApi()
# Store conversation history
conversation_history = []
# Fetch available models
def fetch_models():
try:
response = requests.get("https://text.pollinations.ai/models")
response.raise_for_status()
models = response.json()
return models, gr.update(choices=[m["id"] for m in models], value=default_model)
except Exception as e:
return {"error": f"Failed to fetch models: {e}"}, gr.update(choices=[default_model], value=default_model)
def change_model(selected_model):
global model
model = pl.Text(
model=selected_model,
system="You are a helpful AI assistant.",
contextual=True,
seed="random",
reasoning_effort="medium"
)
return f"Switched to model: {selected_model}"
def chatbot_response(user_message, history, selected_model):
global conversation_history, model
# Ensure model is up-to-date
if model.model != selected_model:
model = pl.Text(
model=selected_model,
system="You are a helpful AI assistant.",
contextual=True,
seed="random",
reasoning_effort="medium"
)
# Generate response with streaming
seed = int(datetime.now().timestamp())
response = ""
for token in model(user_message, stream=True, seed=seed):
response += token
# Append to history with timestamp and model info
timestamp = datetime.now().strftime("%Y-%m-%d %H:%M:%S")
conversation_history.append({
"timestamp": timestamp,
"user_message": user_message,
"bot_response": response,
"model": selected_model
})
# Update Gradio history
history.append((user_message, response))
# Save to Parquet and push to HF
save_conversation()
return history
def save_conversation():
if not conversation_history:
return
# Convert to DataFrame
df = pd.DataFrame(conversation_history)
# Generate filename with timestamp
timestamp = datetime.now().strftime("%Y%m%d_%H%M%S")
filename = f"conversation_{timestamp}.parquet"
# Save to Parquet
table = pa.Table.from_pandas(df)
pq.write_table(table, filename)
# Push to Hugging Face
try:
api.upload_file(
path_or_fileobj=filename,
path_in_repo=f"data/{filename}",
repo_id=REPO_ID,
repo_type="dataset",
token=HF_TOKEN
)
os.remove(filename) # Clean up local file
except Exception as e:
print(f"Error uploading to Hugging Face: {e}")
# Create Gradio interface
with gr.Blocks() as demo:
gr.Markdown("# Pollinations AI Chatbot")
with gr.Row():
with gr.Column(scale=3):
chatbot = gr.Chatbot()
msg = gr.Textbox(placeholder="Type your message...")
model_selector = gr.Dropdown(label="Select Model", choices=[default_model])
change_model_btn = gr.Button("Change Model")
clear = gr.Button("Clear")
with gr.Column(scale=1):
models_output = gr.JSON(label="Pollinations Models")
# Fetch models on load
models_output, model_selector = fetch_models()
def user_input(user_message, history):
return "", history + [[user_message, None]]
def bot_response(history, selected_model):
if not history or not history[-1][0]:
return history
user_message = history[-1][0]
history = chatbot_response(user_message, history, selected_model)
return history
# Event handlers
msg.submit(user_input, [msg, chatbot], [msg, chatbot], queue=False).then(
bot_response, [chatbot, model_selector], chatbot
)
change_model_btn.click(change_model, model_selector, gr.State())
clear.click(lambda: ([], []), None, [chatbot, msg], queue=False)
# Launch the demo
demo.queue().launch() |