Spaces:
Running
Running
import gradio as gr | |
import os | |
import json | |
import requests | |
import google.generativeai as genai | |
from datetime import datetime | |
# Load API keys from environment variables | |
openai_api_key = os.getenv("OPENAI_API_KEY") | |
together_api_key = os.getenv("TOGETHER_API_KEY") | |
gemini_api_key = os.getenv("GEMINI_API_KEY") | |
# Configure API clients | |
import openai | |
openai.api_key = openai_api_key | |
genai.configure(api_key=gemini_api_key) | |
# Initialize conversation history | |
conversation_history = [] | |
learning_data = {} | |
# Function to generate response using OpenAI | |
def generate_openai_response(message, model="gpt-3.5-turbo"): | |
conversation_history.append({"role": "user", "content": message}) | |
try: | |
response = openai.ChatCompletion.create( | |
model=model, | |
messages=conversation_history | |
) | |
assistant_message = response.choices[0].message.content | |
conversation_history.append({"role": "assistant", "content": assistant_message}) | |
# Save for learning | |
save_for_learning(message, assistant_message, "openai", model) | |
return assistant_message | |
except Exception as e: | |
return f"Error with OpenAI: {str(e)}" | |
# Function to generate response using Together AI | |
def generate_together_response(message, model="meta-llama/Llama-4-Maverick-17B-128E-Instruct-FP8"): | |
conversation_history.append({"role": "user", "content": message}) | |
try: | |
headers = { | |
"Authorization": f"Bearer {together_api_key}", | |
"Content-Type": "application/json" | |
} | |
data = { | |
"model": model, | |
"messages": [{"role": "user", "content": message}] | |
} | |
response = requests.post( | |
"https://api.together.xyz/v1/chat/completions", | |
headers=headers, | |
json=data | |
) | |
if response.status_code == 200: | |
result = response.json() | |
assistant_message = result["choices"][0]["message"]["content"] | |
conversation_history.append({"role": "assistant", "content": assistant_message}) | |
# Save for learning | |
save_for_learning(message, assistant_message, "together", model) | |
return assistant_message | |
else: | |
return f"Error with Together AI: {response.text}" | |
except Exception as e: | |
return f"Error with Together AI: {str(e)}" | |
# Function to generate response using Google Gemini | |
def generate_gemini_response(message, model="gemini-1.0-pro"): | |
conversation_history.append({"role": "user", "content": message}) | |
try: | |
gemini_model = genai.GenerativeModel(model) | |
response = gemini_model.generate_content(message) | |
assistant_message = response.text | |
conversation_history.append({"role": "assistant", "content": assistant_message}) | |
# Save for learning | |
save_for_learning(message, assistant_message, "gemini", model) | |
return assistant_message | |
except Exception as e: | |
return f"Error with Google Gemini: {str(e)}" | |
# Function to save data for learning | |
def save_for_learning(user_message, assistant_message, provider, model): | |
timestamp = datetime.now().isoformat() | |
entry = { | |
"timestamp": timestamp, | |
"user_message": user_message, | |
"assistant_message": assistant_message, | |
"provider": provider, | |
"model": model | |
} | |
# In a real system, this would be saved to a database | |
# For this demo, we'll just keep it in memory | |
if "conversations" not in learning_data: | |
learning_data["conversations"] = [] | |
learning_data["conversations"].append(entry) | |
# Trigger autopilot learning (in a real system, this would be a background process) | |
autopilot_learning() | |
# Function for autopilot learning | |
def autopilot_learning(): | |
# In a real system, this would: | |
# 1. Analyze past conversations to identify knowledge gaps | |
# 2. Research topics to fill those gaps | |
# 3. Update the model's knowledge base | |
# 4. Improve response quality over time | |
# For this demo, we'll just log that learning occurred | |
timestamp = datetime.now().isoformat() | |
if "autopilot_events" not in learning_data: | |
learning_data["autopilot_events"] = [] | |
learning_data["autopilot_events"].append({ | |
"timestamp": timestamp, | |
"event": "Autopilot learning cycle completed", | |
"conversations_analyzed": len(learning_data.get("conversations", [])) | |
}) | |
# Function to handle chat based on selected model | |
def chat(message, model_choice): | |
if not message: | |
return "Please enter a message." | |
if model_choice == "OpenAI GPT-3.5": | |
return generate_openai_response(message, "gpt-3.5-turbo") | |
elif model_choice == "OpenAI GPT-4": | |
return generate_openai_response(message, "gpt-4") | |
elif model_choice == "Together AI Llama": | |
return generate_together_response(message, "meta-llama/Llama-4-Maverick-17B-128E-Instruct-FP8") | |
elif model_choice == "Together AI Mistral": | |
return generate_together_response(message, "mistralai/Mistral-7B-Instruct-v0.1") | |
elif model_choice == "Google Gemini Pro": | |
return generate_gemini_response(message, "gemini-1.0-pro") | |
elif model_choice == "Google Gemini Flash": | |
return generate_gemini_response(message, "gemini-2.0-flash") | |
else: | |
return "Please select a valid model." | |
# Create Gradio interface | |
with gr.Blocks(css="footer {visibility: hidden}") as demo: | |
gr.Markdown("# ML Agent System with Autopilot Learning") | |
gr.Markdown("This system supports multiple AI models and features continuous learning in autopilot mode.") | |
with gr.Row(): | |
with gr.Column(scale=4): | |
chatbot = gr.Chatbot(height=400) | |
msg = gr.Textbox(label="Type your message here", placeholder="Ask me anything...") | |
clear = gr.Button("Clear Conversation") | |
with gr.Column(scale=1): | |
model = gr.Radio( | |
["OpenAI GPT-3.5", "OpenAI GPT-4", "Together AI Llama", "Together AI Mistral", "Google Gemini Pro", "Google Gemini Flash"], | |
label="Select AI Model", | |
value="OpenAI GPT-3.5" | |
) | |
gr.Markdown("### System Features") | |
gr.Markdown("- Multi-model support") | |
gr.Markdown("- Continuous learning") | |
gr.Markdown("- Autopilot research mode") | |
gr.Markdown("- Knowledge retention") | |
def respond(message, chat_history, model_choice): | |
if not message: | |
return "", chat_history | |
bot_message = chat(message, model_choice) | |
chat_history.append((message, bot_message)) | |
return "", chat_history | |
msg.submit(respond, [msg, chatbot, model], [msg, chatbot]) | |
clear.click(lambda: None, None, chatbot, queue=False) | |
# Launch the app | |
if __name__ == "__main__": | |
demo.launch(share=True) |