Spaces:
Runtime error
Runtime error
File size: 6,832 Bytes
c76bbac 348d961 b1cf5e1 348d961 b1cf5e1 b18a8a5 c0073a3 b1cf5e1 c0073a3 b1cf5e1 b18a8a5 b1cf5e1 c76bbac b1cf5e1 d8f563a 15d4fae d8f563a 6a4a20b b1cf5e1 c76bbac b1cf5e1 b18a8a5 b1cf5e1 5596668 b1cf5e1 299bb98 b1cf5e1 58645bb b1cf5e1 a6d440d b1cf5e1 4c826ff b1cf5e1 4c826ff b1cf5e1 c2ec4b6 b1cf5e1 9b8d7fa b1cf5e1 b18a8a5 |
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 168 169 170 171 172 173 174 175 176 177 178 179 180 181 182 183 184 185 |
import time
import os
import joblib
import streamlit as st
import google.generativeai as genai
from dotenv import load_dotenv
# Función para cargar CSS personalizado
def load_css(file_path):
with open(file_path) as f:
st.markdown(f'<style>{f.read()}</style>', unsafe_allow_html=True)
# Intentar cargar el CSS personalizado con ruta absoluta para mayor seguridad
try:
css_path = os.path.join(os.path.dirname(__file__), 'static', 'css', 'style.css')
load_css(css_path)
except Exception as e:
print(f"Error al cargar CSS: {e}")
# Usar un estilo mínimo en caso de error
st.markdown('<style>.robocopy-title {color: #4ECDC4;}</style>', unsafe_allow_html=True)
load_dotenv()
GOOGLE_API_KEY=os.environ.get('GOOGLE_API_KEY')
genai.configure(api_key=GOOGLE_API_KEY)
new_chat_id = f'{time.time()}'
MODEL_ROLE = 'ai'
AI_AVATAR_ICON = '🤖' # Cambia el emoji por uno de robot para coincidir con tu logo
USER_AVATAR_ICON = '👤' # Añade un avatar para el usuario
# Crear directorio data/ si no existe
os.makedirs('data/', exist_ok=True)
# Load past chats (if available)
try:
past_chats: dict = joblib.load('data/past_chats_list')
except:
past_chats = {}
# Sidebar allows a list of past chats
with st.sidebar:
# Centrar el logo y eliminar el título de RoboCopy
col1, col2, col3 = st.columns([1, 2, 1])
with col2:
st.image("assets/robocopy_logo.png", width=300)
st.write('# Chats Anteriores')
if st.session_state.get('chat_id') is None:
st.session_state.chat_id = st.selectbox(
label='Selecciona un chat anterior',
options=[new_chat_id] + list(past_chats.keys()),
format_func=lambda x: past_chats.get(x, 'Nuevo Chat'),
placeholder='_',
)
else:
# This will happen the first time AI response comes in
st.session_state.chat_id = st.selectbox(
label='Selecciona un chat anterior',
options=[new_chat_id, st.session_state.chat_id] + list(past_chats.keys()),
index=1,
format_func=lambda x: past_chats.get(x, 'Nuevo Chat' if x != st.session_state.chat_id else st.session_state.chat_title),
placeholder='_',
)
# Save new chats after a message has been sent to AI
# TODO: Give user a chance to name chat
st.session_state.chat_title = f'SesiónChat-{st.session_state.chat_id}'
st.write('# Chatea con Gemini')
# Chat history (allows to ask multiple questions)
try:
st.session_state.messages = joblib.load(
f'data/{st.session_state.chat_id}-st_messages'
)
st.session_state.gemini_history = joblib.load(
f'data/{st.session_state.chat_id}-gemini_messages'
)
print('old cache')
except:
st.session_state.messages = []
st.session_state.gemini_history = []
print('new_cache made')
st.session_state.model = genai.GenerativeModel('gemini-2.0-flash')
st.session_state.chat = st.session_state.model.start_chat(
history=st.session_state.gemini_history,
)
# Display chat messages from history on app rerun
for message in st.session_state.messages:
with st.chat_message(
name=message['role'],
avatar=message.get('avatar'),
):
st.markdown(message['content'])
# React to user input
if prompt := st.chat_input('¿En qué puedo ayudarte hoy?'): # Mensaje más amigable
# Save this as a chat for later
if st.session_state.chat_id not in past_chats.keys():
# Definir el título temporal una sola vez
temp_title = f'SesiónChat-{st.session_state.chat_id}'
past_chats[st.session_state.chat_id] = temp_title
try:
title_generator = genai.GenerativeModel('gemini-2.0-flash')
title_response = title_generator.generate_content(
f"Genera un título corto (máximo 5 palabras) que describa de qué trata esta consulta, sin usar comillas ni puntuación: '{prompt}'")
generated_title = title_response.text.strip()
if generated_title:
st.session_state.chat_title = generated_title
past_chats[st.session_state.chat_id] = generated_title
except Exception as e:
print(f"Error al generar título: {e}")
st.session_state.chat_title = temp_title
else:
# Ya existe esta conversación, usamos el título guardado
st.session_state.chat_title = past_chats[st.session_state.chat_id]
joblib.dump(past_chats, 'data/past_chats_list')
# Display user message in chat message container
with st.chat_message('user', avatar=USER_AVATAR_ICON): # Añade el avatar del usuario
st.markdown(prompt)
# Add user message to chat history
st.session_state.messages.append(
dict(
role='user',
content=prompt,
)
)
## Send message to AI
response = st.session_state.chat.send_message(
prompt,
stream=True,
)
# Display assistant response in chat message container
with st.chat_message(
name=MODEL_ROLE,
avatar=AI_AVATAR_ICON,
):
message_placeholder = st.empty()
full_response = ''
assistant_response = response
# Añade un indicador de "escribiendo..."
typing_indicator = st.empty()
typing_indicator.markdown("*RoboCopy está escribiendo...*")
# Streams in a chunk at a time
for chunk in response:
# Simulate stream of chunk
for word in chunk.text.split(' '):
full_response += word + ' '
time.sleep(0.1) # Velocidad ajustada para mejor legibilidad
# Rewrites with a cursor at end
message_placeholder.write(full_response + '▌')
# Elimina el indicador de escritura
typing_indicator.empty()
# Write full message with placeholder
message_placeholder.write(full_response)
# Add assistant response to chat history
st.session_state.messages.append(
dict(
role=MODEL_ROLE,
content=st.session_state.chat.history[-1].parts[0].text,
avatar=AI_AVATAR_ICON,
)
)
st.session_state.gemini_history = st.session_state.chat.history
# Save to file
joblib.dump(
st.session_state.messages,
f'data/{st.session_state.chat_id}-st_messages',
)
joblib.dump(
st.session_state.gemini_history,
f'data/{st.session_state.chat_id}-gemini_messages',
)
if st.session_state.show_examples and not st.session_state.messages:
st.title("💡 RoboCopy - Tu Experto en PUVs")
st.markdown("### Creando Propuestas Únicas de Valor que Convierten Visitantes en Clientes")
st.markdown("### 🎯 Prueba estos ejemplos:") |