Spaces:
Running
Running
import time | |
import os | |
import joblib | |
import streamlit as st | |
import google.generativeai as genai | |
from dotenv import load_dotenv | |
# Función para cargar CSS personalizado | |
def load_css(file_path): | |
with open(file_path) as f: | |
st.markdown(f'<style>{f.read()}</style>', unsafe_allow_html=True) | |
# Intentar cargar el CSS personalizado con ruta absoluta para mayor seguridad | |
try: | |
css_path = os.path.join(os.path.dirname(__file__), 'static', 'css', 'style.css') | |
load_css(css_path) | |
except Exception as e: | |
print(f"Error al cargar CSS: {e}") | |
# Si el archivo no existe, crear un estilo básico en línea | |
st.markdown(""" | |
<style> | |
.robocopy-title { | |
color: #4ECDC4 !important; | |
font-weight: bold; | |
font-size: 2em; | |
} | |
</style> | |
""", unsafe_allow_html=True) | |
load_dotenv() | |
GOOGLE_API_KEY=os.environ.get('GOOGLE_API_KEY') | |
genai.configure(api_key=GOOGLE_API_KEY) | |
new_chat_id = f'{time.time()}' | |
MODEL_ROLE = 'ai' | |
AI_AVATAR_ICON = '🤖' # Cambia el emoji por uno de robot para coincidir con tu logo | |
USER_AVATAR_ICON = '👤' # Añade un avatar para el usuario | |
# Create a data/ folder if it doesn't already exist | |
try: | |
os.mkdir('data/') | |
except: | |
# data/ folder already exists | |
pass | |
# Load past chats (if available) | |
try: | |
past_chats: dict = joblib.load('data/past_chats_list') | |
except: | |
past_chats = {} | |
# Sidebar allows a list of past chats | |
with st.sidebar: | |
# Centrar el logo y eliminar el título de RoboCopy | |
col1, col2, col3 = st.columns([1, 2, 1]) | |
with col2: | |
st.image("assets/robocopy_logo.png", width=300) | |
st.write('# Chats Anteriores') | |
if st.session_state.get('chat_id') is None: | |
st.session_state.chat_id = st.selectbox( | |
label='Selecciona un chat anterior', | |
options=[new_chat_id] + list(past_chats.keys()), | |
format_func=lambda x: past_chats.get(x, 'Nuevo Chat'), | |
placeholder='_', | |
) | |
else: | |
# This will happen the first time AI response comes in | |
st.session_state.chat_id = st.selectbox( | |
label='Selecciona un chat anterior', | |
options=[new_chat_id, st.session_state.chat_id] + list(past_chats.keys()), | |
index=1, | |
format_func=lambda x: past_chats.get(x, 'Nuevo Chat' if x != st.session_state.chat_id else st.session_state.chat_title), | |
placeholder='_', | |
) | |
# Save new chats after a message has been sent to AI | |
# TODO: Give user a chance to name chat | |
st.session_state.chat_title = f'SesiónChat-{st.session_state.chat_id}' | |
st.write('# Chatea con Gemini') | |
# Chat history (allows to ask multiple questions) | |
try: | |
st.session_state.messages = joblib.load( | |
f'data/{st.session_state.chat_id}-st_messages' | |
) | |
st.session_state.gemini_history = joblib.load( | |
f'data/{st.session_state.chat_id}-gemini_messages' | |
) | |
print('old cache') | |
except: | |
st.session_state.messages = [] | |
st.session_state.gemini_history = [] | |
print('new_cache made') | |
st.session_state.model = genai.GenerativeModel('gemini-2.0-flash') | |
st.session_state.chat = st.session_state.model.start_chat( | |
history=st.session_state.gemini_history, | |
) | |
# Display chat messages from history on app rerun | |
for message in st.session_state.messages: | |
with st.chat_message( | |
name=message['role'], | |
avatar=message.get('avatar'), | |
): | |
st.markdown(message['content']) | |
# React to user input | |
if prompt := st.chat_input('¿En qué puedo ayudarte hoy?'): # Mensaje más amigable | |
# Save this as a chat for later | |
if st.session_state.chat_id not in past_chats.keys(): | |
# Es una nueva conversación, generemos un título basado en el primer mensaje | |
# Primero, guardamos un título temporal | |
temp_title = f'SesiónChat-{st.session_state.chat_id}' | |
past_chats[st.session_state.chat_id] = temp_title | |
# Generamos un título basado en el contenido del mensaje | |
try: | |
# Usamos el mismo modelo para generar un título corto | |
title_generator = genai.GenerativeModel('gemini-2.0-flash') | |
title_response = title_generator.generate_content( | |
f"Genera un título corto (máximo 5 palabras) que describa de qué trata esta consulta, sin usar comillas ni puntuación: '{prompt}'") | |
# Obtenemos el título generado | |
generated_title = title_response.text.strip() | |
# Actualizamos el título en past_chats | |
if generated_title: | |
st.session_state.chat_title = generated_title | |
past_chats[st.session_state.chat_id] = generated_title | |
else: | |
st.session_state.chat_title = temp_title | |
except Exception as e: | |
print(f"Error al generar título: {e}") | |
st.session_state.chat_title = temp_title | |
else: | |
# Ya existe esta conversación, usamos el título guardado | |
st.session_state.chat_title = past_chats[st.session_state.chat_id] | |
joblib.dump(past_chats, 'data/past_chats_list') | |
# Display user message in chat message container | |
with st.chat_message('user', avatar=USER_AVATAR_ICON): # Añade el avatar del usuario | |
st.markdown(prompt) | |
# Add user message to chat history | |
st.session_state.messages.append( | |
dict( | |
role='user', | |
content=prompt, | |
) | |
) | |
## Send message to AI | |
response = st.session_state.chat.send_message( | |
prompt, | |
stream=True, | |
) | |
# Display assistant response in chat message container | |
with st.chat_message( | |
name=MODEL_ROLE, | |
avatar=AI_AVATAR_ICON, | |
): | |
message_placeholder = st.empty() | |
full_response = '' | |
assistant_response = response | |
# Añade un indicador de "escribiendo..." | |
typing_indicator = st.empty() | |
typing_indicator.markdown("*RoboCopy está escribiendo...*") | |
# Streams in a chunk at a time | |
for chunk in response: | |
# Simulate stream of chunk | |
# TODO: Chunk missing `text` if API stops mid-stream ("safety"?) | |
for ch in chunk.text.split(' '): | |
full_response += ch + ' ' | |
time.sleep(0.1) # Aumentado de 0.05 a 0.1 segundos para una velocidad más lenta | |
# Rewrites with a cursor at end | |
message_placeholder.write(full_response + '▌') | |
# Elimina el indicador de escritura | |
typing_indicator.empty() | |
# Write full message with placeholder | |
message_placeholder.write(full_response) | |
# Add assistant response to chat history | |
st.session_state.messages.append( | |
dict( | |
role=MODEL_ROLE, | |
content=st.session_state.chat.history[-1].parts[0].text, | |
avatar=AI_AVATAR_ICON, | |
) | |
) | |
st.session_state.gemini_history = st.session_state.chat.history | |
# Save to file | |
joblib.dump( | |
st.session_state.messages, | |
f'data/{st.session_state.chat_id}-st_messages', | |
) | |
joblib.dump( | |
st.session_state.gemini_history, | |
f'data/{st.session_state.chat_id}-gemini_messages', | |
) |