Spaces:
Running
Running
Upload app.py
Browse files
app.py
CHANGED
@@ -4,399 +4,198 @@ import joblib
|
|
4 |
import streamlit as st
|
5 |
import google.generativeai as genai
|
6 |
from dotenv import load_dotenv
|
7 |
-
from puv_formulas import puv_formulas
|
8 |
|
9 |
-
#
|
10 |
-
CONFIG = {
|
11 |
-
'model': {
|
12 |
-
'role': 'ai',
|
13 |
-
'avatar': '🤖'
|
14 |
-
},
|
15 |
-
'user': {
|
16 |
-
'avatar': '👤'
|
17 |
-
},
|
18 |
-
'page': {
|
19 |
-
'title': "RoboCopy - Creador de PUVs",
|
20 |
-
'icon': "🚀",
|
21 |
-
'layout': "wide"
|
22 |
-
}
|
23 |
-
}
|
24 |
-
|
25 |
-
# Configuración de página
|
26 |
-
st.set_page_config(
|
27 |
-
page_title=CONFIG['page']['title'],
|
28 |
-
page_icon=CONFIG['page']['icon'],
|
29 |
-
layout=CONFIG['page']['layout']
|
30 |
-
)
|
31 |
-
|
32 |
-
# Configuración de avatares e identificadores
|
33 |
-
MODEL_ROLE = 'ai'
|
34 |
-
AI_AVATAR_ICON = '🤖'
|
35 |
-
USER_AVATAR_ICON = '👤'
|
36 |
-
|
37 |
-
# === FUNCIONES AUXILIARES ===
|
38 |
def load_css(file_path):
|
39 |
-
|
40 |
-
|
41 |
-
|
42 |
-
|
43 |
-
|
44 |
-
|
45 |
-
|
46 |
-
|
47 |
-
|
48 |
-
|
49 |
-
|
50 |
-
|
51 |
-
|
52 |
-
|
53 |
-
|
54 |
-
|
55 |
-
return "Nuevo Chat"
|
56 |
-
first_msg = messages[0]['content'] if messages else ""
|
57 |
-
title = first_msg[:30] + "..." if len(first_msg) > 30 else first_msg
|
58 |
-
return title
|
59 |
-
|
60 |
-
def get_formulas_for_prompt():
|
61 |
-
prompt_text = "\nFÓRMULAS DE PROPUESTAS ÚNICAS DE VALOR (PUVs):\n\n"
|
62 |
-
|
63 |
-
for key, formula in puv_formulas.items():
|
64 |
-
prompt_text += f"🔹 {key}:\n"
|
65 |
-
prompt_text += f" - Descripción: {formula.get('description', 'Descripción no disponible').strip()}\n"
|
66 |
-
|
67 |
-
if 'Structure:' in formula.get('description', ''):
|
68 |
-
estructura = formula['description'].split('Structure:')[1].split('Key elements:')[0].strip()
|
69 |
-
prompt_text += " - Estructura Base:\n"
|
70 |
-
for line in estructura.split('\n'):
|
71 |
-
if line.strip():
|
72 |
-
prompt_text += f" * {line.strip()}\n"
|
73 |
-
|
74 |
-
ejemplos = formula.get('examples', [])[:2]
|
75 |
-
if ejemplos:
|
76 |
-
prompt_text += " - Ejemplos destacados:\n"
|
77 |
-
for i, ejemplo in enumerate(ejemplos):
|
78 |
-
prompt_text += f" {i+1}. Público objetivo: {ejemplo.get('target_audience', 'No especificado')}\n"
|
79 |
-
prompt_text += f" Servicio: {ejemplo.get('product_service', 'No especificado')}\n"
|
80 |
-
prompt_text += f" PUV: {ejemplo.get('uvp', 'Ejemplo no disponible')}\n"
|
81 |
-
|
82 |
-
prompt_text += "\n" + "-"*50 + "\n"
|
83 |
-
|
84 |
-
return prompt_text
|
85 |
-
|
86 |
-
def add_message(role, content, avatar):
|
87 |
-
message = {
|
88 |
-
'role': role,
|
89 |
-
'content': content,
|
90 |
-
'avatar': avatar
|
91 |
}
|
92 |
-
|
93 |
-
|
94 |
-
|
95 |
-
def update_chat_memory():
|
96 |
-
st.session_state.chats_in_memory[st.session_state.current_chat_id].update({
|
97 |
-
'messages': st.session_state.messages,
|
98 |
-
'gemini_history': st.session_state.gemini_history,
|
99 |
-
'title': st.session_state.chat_title
|
100 |
-
})
|
101 |
|
102 |
-
def handle_model_error(error, retry_count, max_retries):
|
103 |
-
if retry_count >= max_retries:
|
104 |
-
error_message = f"Lo siento, estoy experimentando problemas para procesar tu solicitud. Por favor, intenta de nuevo más tarde. Error: {str(error)}"
|
105 |
-
return error_message
|
106 |
-
return None
|
107 |
-
|
108 |
-
def process_model_response(prompt, max_retries=3):
|
109 |
-
retry_count = 0
|
110 |
-
while retry_count < max_retries:
|
111 |
-
try:
|
112 |
-
# Crear el mensaje del modelo primero
|
113 |
-
with st.chat_message(name=MODEL_ROLE, avatar=AI_AVATAR_ICON):
|
114 |
-
# Añade un indicador de "escribiendo..."
|
115 |
-
typing_indicator = st.empty()
|
116 |
-
typing_indicator.markdown("*🤖 RoboCopy está escribiendo...*")
|
117 |
-
|
118 |
-
response = st.session_state.chat.send_message(prompt, stream=True)
|
119 |
-
mensaje_completo = ""
|
120 |
-
mensaje_actual = ""
|
121 |
-
|
122 |
-
# Procesar la respuesta por chunks con efecto de escritura
|
123 |
-
for chunk in response:
|
124 |
-
mensaje_completo += chunk.text
|
125 |
-
for caracter in chunk.text:
|
126 |
-
mensaje_actual += caracter
|
127 |
-
typing_indicator.markdown(mensaje_actual + "▌")
|
128 |
-
time.sleep(0.02) # Velocidad de escritura ajustada
|
129 |
-
|
130 |
-
# Mostrar mensaje final y limpiar indicador
|
131 |
-
typing_indicator.markdown(mensaje_completo)
|
132 |
-
|
133 |
-
add_message(MODEL_ROLE, mensaje_completo, AI_AVATAR_ICON)
|
134 |
-
st.session_state.gemini_history = st.session_state.chat.history
|
135 |
-
update_chat_memory()
|
136 |
-
return True
|
137 |
-
|
138 |
-
except Exception as e:
|
139 |
-
retry_count += 1
|
140 |
-
error_msg = handle_model_error(e, retry_count, max_retries)
|
141 |
-
if error_msg:
|
142 |
-
with st.chat_message(name=MODEL_ROLE, avatar=AI_AVATAR_ICON):
|
143 |
-
st.error(error_msg)
|
144 |
-
add_message(MODEL_ROLE, error_msg, AI_AVATAR_ICON)
|
145 |
-
update_chat_memory()
|
146 |
-
return False
|
147 |
-
wait_time = (1.5 ** retry_count)
|
148 |
-
time.sleep(wait_time)
|
149 |
-
return False
|
150 |
-
|
151 |
-
def mostrar_con_efecto_escritura(texto, velocidad=0.05):
|
152 |
-
placeholder = st.empty()
|
153 |
-
contenido_actual = ""
|
154 |
-
|
155 |
-
for caracter in texto:
|
156 |
-
contenido_actual += caracter
|
157 |
-
placeholder.markdown(contenido_actual + "▌")
|
158 |
-
time.sleep(velocidad)
|
159 |
-
|
160 |
-
placeholder.markdown(contenido_actual)
|
161 |
-
return contenido_actual
|
162 |
-
|
163 |
-
def process_model_response(prompt, max_retries=3):
|
164 |
-
retry_count = 0
|
165 |
-
while retry_count < max_retries:
|
166 |
-
try:
|
167 |
-
# Crear el mensaje del modelo primero
|
168 |
-
with st.chat_message(name=MODEL_ROLE, avatar=AI_AVATAR_ICON):
|
169 |
-
# Añade un indicador de "escribiendo..."
|
170 |
-
typing_indicator = st.empty()
|
171 |
-
typing_indicator.markdown("*🤖 RoboCopy está escribiendo...*")
|
172 |
-
|
173 |
-
response = st.session_state.chat.send_message(prompt, stream=True)
|
174 |
-
mensaje_completo = ""
|
175 |
-
mensaje_actual = ""
|
176 |
-
|
177 |
-
# Procesar la respuesta por chunks con efecto de escritura
|
178 |
-
for chunk in response:
|
179 |
-
mensaje_completo += chunk.text
|
180 |
-
for caracter in chunk.text:
|
181 |
-
mensaje_actual += caracter
|
182 |
-
typing_indicator.markdown(mensaje_actual + "▌")
|
183 |
-
time.sleep(0.02) # Velocidad de escritura ajustada
|
184 |
-
|
185 |
-
# Mostrar mensaje final y limpiar indicador
|
186 |
-
typing_indicator.markdown(mensaje_completo)
|
187 |
-
|
188 |
-
add_message(MODEL_ROLE, mensaje_completo, AI_AVATAR_ICON)
|
189 |
-
st.session_state.gemini_history = st.session_state.chat.history
|
190 |
-
update_chat_memory()
|
191 |
-
return True
|
192 |
-
|
193 |
-
except Exception as e:
|
194 |
-
retry_count += 1
|
195 |
-
if handle_model_error(e, retry_count, max_retries):
|
196 |
-
return False
|
197 |
-
wait_time = (1.5 ** retry_count)
|
198 |
-
time.sleep(wait_time)
|
199 |
-
return False
|
200 |
-
|
201 |
-
def handle_example_click(prompt_text):
|
202 |
-
"""Función para manejar clicks en ejemplos"""
|
203 |
-
st.session_state.update({
|
204 |
-
'show_examples': False,
|
205 |
-
'messages': [],
|
206 |
-
'current_chat_id': str(time.time()),
|
207 |
-
'gemini_history': [],
|
208 |
-
'chat_title': 'Nuevo Chat',
|
209 |
-
'user_input': prompt_text
|
210 |
-
})
|
211 |
-
|
212 |
-
# Inicializar nuevo chat si es necesario
|
213 |
-
if st.session_state.current_chat_id not in st.session_state.chats_in_memory:
|
214 |
-
st.session_state.chats_in_memory[st.session_state.current_chat_id] = {
|
215 |
-
'messages': [],
|
216 |
-
'gemini_history': [],
|
217 |
-
'title': 'Nuevo Chat'
|
218 |
-
}
|
219 |
-
|
220 |
-
st.rerun()
|
221 |
-
|
222 |
-
# === CONFIGURACIÓN INICIAL ===
|
223 |
load_dotenv()
|
224 |
-
GOOGLE_API_KEY
|
225 |
genai.configure(api_key=GOOGLE_API_KEY)
|
226 |
|
227 |
-
|
228 |
-
|
229 |
-
|
230 |
-
|
231 |
-
|
232 |
-
|
233 |
-
|
234 |
-
|
235 |
-
|
236 |
-
|
237 |
-
|
238 |
-
|
239 |
-
|
240 |
-
|
241 |
-
|
242 |
-
|
243 |
-
|
244 |
-
|
245 |
-
|
246 |
-
|
247 |
-
{get_formulas_for_prompt()}
|
248 |
-
|
249 |
-
INSTRUCCIONES PARA CREAR PUVs:
|
250 |
-
1. Si el usuario no ha proporcionado información sobre su producto/servicio y audiencia objetivo, solicítala de manera amable y directa.
|
251 |
-
2. Si el usuario ha proporcionado información pero no ha elegido fórmula específica, pregúntale qué fórmula le gustaría utilizar.
|
252 |
-
3. Una vez con toda la información, crear propuestas de valor utilizando ÚNICAMENTE la fórmula elegida.
|
253 |
-
"""
|
254 |
-
|
255 |
-
WELCOME_MESSAGE = """
|
256 |
-
¡Hola! 👋 Soy RoboCopy, tu asistente especializado en crear Propuestas Únicas de Valor impactantes.
|
257 |
-
¿En qué puedo ayudarte hoy?
|
258 |
-
"""
|
259 |
-
|
260 |
-
# === INICIALIZACIÓN DEL ESTADO ===
|
261 |
-
new_chat_id = str(time.time())
|
262 |
-
|
263 |
-
if 'chats_in_memory' not in st.session_state:
|
264 |
-
st.session_state.update({
|
265 |
-
'chats_in_memory': {},
|
266 |
-
'current_chat_id': new_chat_id,
|
267 |
-
'chat_title': 'Nuevo Chat',
|
268 |
-
'messages': [],
|
269 |
-
'show_examples': True,
|
270 |
-
'gemini_history': []
|
271 |
-
})
|
272 |
-
|
273 |
-
# === SIDEBAR ===
|
274 |
with st.sidebar:
|
|
|
275 |
col1, col2, col3 = st.columns([1, 2, 1])
|
276 |
with col2:
|
277 |
st.image("assets/robocopy_logo.png", width=300)
|
278 |
|
279 |
st.write('# Chats Anteriores')
|
280 |
-
|
281 |
-
|
282 |
-
|
283 |
-
|
284 |
-
|
285 |
-
'
|
286 |
-
|
287 |
-
|
288 |
-
|
289 |
-
|
290 |
-
|
291 |
-
|
292 |
-
|
293 |
-
|
294 |
-
|
295 |
-
|
296 |
-
|
297 |
-
#
|
298 |
-
|
299 |
-
|
300 |
-
st.
|
301 |
-
|
302 |
-
|
303 |
-
|
304 |
-
st.session_state.
|
305 |
-
|
306 |
-
|
307 |
-
|
308 |
-
|
309 |
-
|
310 |
-
|
311 |
-
|
312 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
313 |
|
314 |
-
|
315 |
-
|
316 |
-
|
317 |
-
|
318 |
-
|
319 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
320 |
|
321 |
-
|
322 |
-
|
323 |
-
|
324 |
-
|
325 |
-
|
|
|
326 |
|
327 |
-
|
328 |
-
|
329 |
-
|
330 |
-
with col2:
|
331 |
-
if st.button("Ayúdame a mejorar mi PUV actual ✨", use_container_width=True):
|
332 |
-
handle_example_click("¿Podrías ayudarme a mejorar mi PUV actual para hacerla más persuasiva?")
|
333 |
|
334 |
-
|
335 |
-
|
336 |
-
|
337 |
-
|
338 |
-
|
339 |
-
|
340 |
-
for message in st.session_state.messages:
|
341 |
-
with st.chat_message(
|
342 |
-
name=message['role'],
|
343 |
-
avatar=AI_AVATAR_ICON if message['role'] == MODEL_ROLE else USER_AVATAR_ICON
|
344 |
-
):
|
345 |
-
st.markdown(message['content'])
|
346 |
-
|
347 |
-
# Procesar entrada del usuario si existe
|
348 |
-
if 'user_input' in st.session_state:
|
349 |
-
prompt = st.session_state.user_input
|
350 |
-
del st.session_state.user_input
|
351 |
-
|
352 |
-
with st.chat_message("user", avatar=USER_AVATAR_ICON):
|
353 |
-
st.markdown(prompt)
|
354 |
-
add_message("user", prompt, USER_AVATAR_ICON)
|
355 |
-
|
356 |
-
try:
|
357 |
-
title_response = st.session_state.model.generate_content(
|
358 |
-
f"Título para consulta: '{prompt}' (máximo 4 palabras)"
|
359 |
-
)
|
360 |
-
st.session_state.chat_title = title_response.text.strip()[:25]
|
361 |
except Exception as e:
|
362 |
-
|
363 |
-
|
364 |
-
|
365 |
-
|
366 |
-
|
367 |
-
'title': st.session_state.chat_title
|
368 |
-
}
|
369 |
-
|
370 |
-
# Añadir el indicador de escritura
|
371 |
-
with st.chat_message(name=MODEL_ROLE, avatar=AI_AVATAR_ICON):
|
372 |
-
typing_indicator = st.empty()
|
373 |
-
typing_indicator.markdown("*🤖 RoboCopy está escribiendo...*")
|
374 |
-
process_model_response(prompt)
|
375 |
-
update_chat_memory()
|
376 |
|
377 |
-
|
378 |
-
|
379 |
-
|
380 |
-
|
381 |
-
|
382 |
-
|
383 |
-
|
384 |
-
|
385 |
-
|
386 |
-
|
387 |
-
|
388 |
-
|
389 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
390 |
|
391 |
-
|
392 |
-
|
393 |
-
|
394 |
-
'title': st.session_state.chat_title
|
395 |
-
}
|
396 |
|
397 |
-
#
|
398 |
-
|
399 |
-
|
400 |
-
|
401 |
-
|
402 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
4 |
import streamlit as st
|
5 |
import google.generativeai as genai
|
6 |
from dotenv import load_dotenv
|
|
|
7 |
|
8 |
+
# Función para cargar CSS personalizado
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
9 |
def load_css(file_path):
|
10 |
+
with open(file_path) as f:
|
11 |
+
st.markdown(f'<style>{f.read()}</style>', unsafe_allow_html=True)
|
12 |
+
|
13 |
+
# Intentar cargar el CSS personalizado con ruta absoluta para mayor seguridad
|
14 |
+
try:
|
15 |
+
css_path = os.path.join(os.path.dirname(__file__), 'static', 'css', 'style.css')
|
16 |
+
load_css(css_path)
|
17 |
+
except Exception as e:
|
18 |
+
print(f"Error al cargar CSS: {e}")
|
19 |
+
# Si el archivo no existe, crear un estilo básico en línea
|
20 |
+
st.markdown("""
|
21 |
+
<style>
|
22 |
+
.robocopy-title {
|
23 |
+
color: #4ECDC4 !important;
|
24 |
+
font-weight: bold;
|
25 |
+
font-size: 2em;
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
26 |
}
|
27 |
+
</style>
|
28 |
+
""", unsafe_allow_html=True)
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
29 |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
30 |
load_dotenv()
|
31 |
+
GOOGLE_API_KEY=os.environ.get('GOOGLE_API_KEY')
|
32 |
genai.configure(api_key=GOOGLE_API_KEY)
|
33 |
|
34 |
+
new_chat_id = f'{time.time()}'
|
35 |
+
MODEL_ROLE = 'ai'
|
36 |
+
AI_AVATAR_ICON = '🤖' # Cambia el emoji por uno de robot para coincidir con tu logo
|
37 |
+
USER_AVATAR_ICON = '👤' # Añade un avatar para el usuario
|
38 |
+
|
39 |
+
# Create a data/ folder if it doesn't already exist
|
40 |
+
try:
|
41 |
+
os.mkdir('data/')
|
42 |
+
except:
|
43 |
+
# data/ folder already exists
|
44 |
+
pass
|
45 |
+
|
46 |
+
# Load past chats (if available)
|
47 |
+
try:
|
48 |
+
past_chats: dict = joblib.load('data/past_chats_list')
|
49 |
+
except:
|
50 |
+
past_chats = {}
|
51 |
+
|
52 |
+
# Sidebar allows a list of past chats
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
53 |
with st.sidebar:
|
54 |
+
# Centrar el logo y eliminar el título de RoboCopy
|
55 |
col1, col2, col3 = st.columns([1, 2, 1])
|
56 |
with col2:
|
57 |
st.image("assets/robocopy_logo.png", width=300)
|
58 |
|
59 |
st.write('# Chats Anteriores')
|
60 |
+
if st.session_state.get('chat_id') is None:
|
61 |
+
st.session_state.chat_id = st.selectbox(
|
62 |
+
label='Selecciona un chat anterior',
|
63 |
+
options=[new_chat_id] + list(past_chats.keys()),
|
64 |
+
format_func=lambda x: past_chats.get(x, 'Nuevo Chat'),
|
65 |
+
placeholder='_',
|
66 |
+
)
|
67 |
+
else:
|
68 |
+
# This will happen the first time AI response comes in
|
69 |
+
st.session_state.chat_id = st.selectbox(
|
70 |
+
label='Selecciona un chat anterior',
|
71 |
+
options=[new_chat_id, st.session_state.chat_id] + list(past_chats.keys()),
|
72 |
+
index=1,
|
73 |
+
format_func=lambda x: past_chats.get(x, 'Nuevo Chat' if x != st.session_state.chat_id else st.session_state.chat_title),
|
74 |
+
placeholder='_',
|
75 |
+
)
|
76 |
+
# Save new chats after a message has been sent to AI
|
77 |
+
# TODO: Give user a chance to name chat
|
78 |
+
st.session_state.chat_title = f'SesiónChat-{st.session_state.chat_id}'
|
79 |
+
|
80 |
+
st.write('# Chatea con Gemini')
|
81 |
+
|
82 |
+
# Chat history (allows to ask multiple questions)
|
83 |
+
try:
|
84 |
+
st.session_state.messages = joblib.load(
|
85 |
+
f'data/{st.session_state.chat_id}-st_messages'
|
86 |
+
)
|
87 |
+
st.session_state.gemini_history = joblib.load(
|
88 |
+
f'data/{st.session_state.chat_id}-gemini_messages'
|
89 |
+
)
|
90 |
+
print('old cache')
|
91 |
+
except:
|
92 |
+
st.session_state.messages = []
|
93 |
+
st.session_state.gemini_history = []
|
94 |
+
print('new_cache made')
|
95 |
+
st.session_state.model = genai.GenerativeModel('gemini-2.0-flash')
|
96 |
+
st.session_state.chat = st.session_state.model.start_chat(
|
97 |
+
history=st.session_state.gemini_history,
|
98 |
+
)
|
99 |
|
100 |
+
# Display chat messages from history on app rerun
|
101 |
+
for message in st.session_state.messages:
|
102 |
+
with st.chat_message(
|
103 |
+
name=message['role'],
|
104 |
+
avatar=message.get('avatar'),
|
105 |
+
):
|
106 |
+
st.markdown(message['content'])
|
107 |
+
|
108 |
+
# React to user input
|
109 |
+
if prompt := st.chat_input('¿En qué puedo ayudarte hoy?'): # Mensaje más amigable
|
110 |
+
# Save this as a chat for later
|
111 |
+
if st.session_state.chat_id not in past_chats.keys():
|
112 |
+
# Es una nueva conversación, generemos un título basado en el primer mensaje
|
113 |
+
# Primero, guardamos un título temporal
|
114 |
+
temp_title = f'SesiónChat-{st.session_state.chat_id}'
|
115 |
+
past_chats[st.session_state.chat_id] = temp_title
|
116 |
|
117 |
+
# Generamos un título basado en el contenido del mensaje
|
118 |
+
try:
|
119 |
+
# Usamos el mismo modelo para generar un título corto
|
120 |
+
title_generator = genai.GenerativeModel('gemini-2.0-flash')
|
121 |
+
title_response = title_generator.generate_content(
|
122 |
+
f"Genera un título corto (máximo 5 palabras) que describa de qué trata esta consulta, sin usar comillas ni puntuación: '{prompt}'")
|
123 |
|
124 |
+
# Obtenemos el título generado
|
125 |
+
generated_title = title_response.text.strip()
|
|
|
|
|
|
|
|
|
126 |
|
127 |
+
# Actualizamos el título en past_chats
|
128 |
+
if generated_title:
|
129 |
+
st.session_state.chat_title = generated_title
|
130 |
+
past_chats[st.session_state.chat_id] = generated_title
|
131 |
+
else:
|
132 |
+
st.session_state.chat_title = temp_title
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
133 |
except Exception as e:
|
134 |
+
print(f"Error al generar título: {e}")
|
135 |
+
st.session_state.chat_title = temp_title
|
136 |
+
else:
|
137 |
+
# Ya existe esta conversación, usamos el título guardado
|
138 |
+
st.session_state.chat_title = past_chats[st.session_state.chat_id]
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
139 |
|
140 |
+
joblib.dump(past_chats, 'data/past_chats_list')
|
141 |
+
|
142 |
+
# Display user message in chat message container
|
143 |
+
with st.chat_message('user', avatar=USER_AVATAR_ICON): # Añade el avatar del usuario
|
144 |
+
st.markdown(prompt)
|
145 |
+
# Add user message to chat history
|
146 |
+
st.session_state.messages.append(
|
147 |
+
dict(
|
148 |
+
role='user',
|
149 |
+
content=prompt,
|
150 |
+
)
|
151 |
+
)
|
152 |
+
## Send message to AI
|
153 |
+
response = st.session_state.chat.send_message(
|
154 |
+
prompt,
|
155 |
+
stream=True,
|
156 |
+
)
|
157 |
+
# Display assistant response in chat message container
|
158 |
+
with st.chat_message(
|
159 |
+
name=MODEL_ROLE,
|
160 |
+
avatar=AI_AVATAR_ICON,
|
161 |
+
):
|
162 |
+
message_placeholder = st.empty()
|
163 |
+
full_response = ''
|
164 |
+
assistant_response = response
|
165 |
|
166 |
+
# Añade un indicador de "escribiendo..."
|
167 |
+
typing_indicator = st.empty()
|
168 |
+
typing_indicator.markdown("*RoboCopy está escribiendo...*")
|
|
|
|
|
169 |
|
170 |
+
# Streams in a chunk at a time
|
171 |
+
for chunk in response:
|
172 |
+
# Simulate stream of chunk
|
173 |
+
# TODO: Chunk missing `text` if API stops mid-stream ("safety"?)
|
174 |
+
for ch in chunk.text.split(' '):
|
175 |
+
full_response += ch + ' '
|
176 |
+
time.sleep(0.1) # Aumentado de 0.05 a 0.1 segundos para una velocidad más lenta
|
177 |
+
# Rewrites with a cursor at end
|
178 |
+
message_placeholder.write(full_response + '▌')
|
179 |
+
# Elimina el indicador de escritura
|
180 |
+
typing_indicator.empty()
|
181 |
+
# Write full message with placeholder
|
182 |
+
message_placeholder.write(full_response)
|
183 |
+
|
184 |
+
# Add assistant response to chat history
|
185 |
+
st.session_state.messages.append(
|
186 |
+
dict(
|
187 |
+
role=MODEL_ROLE,
|
188 |
+
content=st.session_state.chat.history[-1].parts[0].text,
|
189 |
+
avatar=AI_AVATAR_ICON,
|
190 |
+
)
|
191 |
+
)
|
192 |
+
st.session_state.gemini_history = st.session_state.chat.history
|
193 |
+
# Save to file
|
194 |
+
joblib.dump(
|
195 |
+
st.session_state.messages,
|
196 |
+
f'data/{st.session_state.chat_id}-st_messages',
|
197 |
+
)
|
198 |
+
joblib.dump(
|
199 |
+
st.session_state.gemini_history,
|
200 |
+
f'data/{st.session_state.chat_id}-gemini_messages',
|
201 |
+
)
|