Spaces:
Runtime error
Runtime error
import gradio as gr | |
import pollinations as pl | |
import pandas as pd | |
import os | |
from datetime import datetime | |
from huggingface_hub import HfApi | |
import pyarrow.parquet as pq | |
import pyarrow as pa | |
import requests | |
# Initialize Pollinations text model | |
default_model = "openai" | |
model = pl.Text( | |
model=default_model, | |
system="You are a helpful AI assistant.", | |
contextual=True, | |
seed="random", | |
reasoning_effort="medium" | |
) | |
# Hugging Face setup | |
HF_TOKEN = os.getenv("HF_TOKEN") # Set in HF Space secrets | |
REPO_ID = "kulia-moon/conza" | |
api = HfApi() | |
# Store conversation history | |
conversation_history = [] | |
# Fetch available models | |
def fetch_models(): | |
try: | |
response = requests.get("https://text.pollinations.ai/models") | |
response.raise_for_status() | |
models = response.json() | |
return models, gr.update(choices=[m["id"] for m in models], value=default_model) | |
except Exception as e: | |
return {"error": f"Failed to fetch models: {e}"}, gr.update(choices=[default_model], value=default_model) | |
def change_model(selected_model): | |
global model | |
model = pl.Text( | |
model=selected_model, | |
system="You are a helpful AI assistant.", | |
contextual=True, | |
seed="random", | |
reasoning_effort="medium" | |
) | |
return f"Switched to model: {selected_model}" | |
def chatbot_response(user_message, history, selected_model): | |
global conversation_history, model | |
# Ensure model is up-to-date | |
if model.model != selected_model: | |
model = pl.Text( | |
model=selected_model, | |
system="You are a helpful AI assistant.", | |
contextual=True, | |
seed="random", | |
reasoning_effort="medium" | |
) | |
# Generate response with streaming | |
seed = int(datetime.now().timestamp()) | |
response = "" | |
for token in model(user_message, stream=True, seed=seed): | |
response += token | |
# Append to history with timestamp and model info | |
timestamp = datetime.now().strftime("%Y-%m-%d %H:%M:%S") | |
conversation_history.append({ | |
"timestamp": timestamp, | |
"user_message": user_message, | |
"bot_response": response, | |
"model": selected_model | |
}) | |
# Update Gradio history | |
history.append((user_message, response)) | |
# Save to Parquet and push to HF | |
save_conversation() | |
return history | |
def save_conversation(): | |
if not conversation_history: | |
return | |
# Convert to DataFrame | |
df = pd.DataFrame(conversation_history) | |
# Generate filename with timestamp | |
timestamp = datetime.now().strftime("%Y%m%d_%H%M%S") | |
filename = f"conversation_{timestamp}.parquet" | |
# Save to Parquet | |
table = pa.Table.from_pandas(df) | |
pq.write_table(table, filename) | |
# Push to Hugging Face | |
try: | |
api.upload_file( | |
path_or_fileobj=filename, | |
path_in_repo=f"data/{filename}", | |
repo_id=REPO_ID, | |
repo_type="dataset", | |
token=HF_TOKEN | |
) | |
os.remove(filename) # Clean up local file | |
except Exception as e: | |
print(f"Error uploading to Hugging Face: {e}") | |
# Create Gradio interface | |
with gr.Blocks() as demo: | |
gr.Markdown("# Pollinations AI Chatbot") | |
with gr.Row(): | |
with gr.Column(scale=3): | |
chatbot = gr.Chatbot() | |
msg = gr.Textbox(placeholder="Type your message...") | |
model_selector = gr.Dropdown(label="Select Model", choices=[default_model]) | |
change_model_btn = gr.Button("Change Model") | |
clear = gr.Button("Clear") | |
with gr.Column(scale=1): | |
models_output = gr.JSON(label="Pollinations Models") | |
# Fetch models on load | |
models_output, model_selector = fetch_models() | |
def user_input(user_message, history): | |
return "", history + [[user_message, None]] | |
def bot_response(history, selected_model): | |
if not history or not history[-1][0]: | |
return history | |
user_message = history[-1][0] | |
history = chatbot_response(user_message, history, selected_model) | |
return history | |
# Event handlers | |
msg.submit(user_input, [msg, chatbot], [msg, chatbot], queue=False).then( | |
bot_response, [chatbot, model_selector], chatbot | |
) | |
change_model_btn.click(change_model, model_selector, gr.State()) | |
clear.click(lambda: ([], []), None, [chatbot, msg], queue=False) | |
# Launch the demo | |
demo.queue().launch() |