Spaces:
Running
Running
Upload app.py
Browse files
app.py
CHANGED
@@ -129,7 +129,7 @@ def process_model_response(prompt, max_retries=3):
|
|
129 |
with st.chat_message(name=MODEL_ROLE, avatar=AI_AVATAR_ICON):
|
130 |
# Añade un indicador de "escribiendo..."
|
131 |
typing_indicator = st.empty()
|
132 |
-
typing_indicator.markdown("
|
133 |
|
134 |
response = st.session_state.chat.send_message(prompt, stream=True)
|
135 |
mensaje_completo = ""
|
@@ -141,9 +141,9 @@ def process_model_response(prompt, max_retries=3):
|
|
141 |
for caracter in chunk.text:
|
142 |
mensaje_actual += caracter
|
143 |
typing_indicator.markdown(mensaje_actual + "▌")
|
144 |
-
time.sleep(0.
|
145 |
|
146 |
-
# Mostrar mensaje final
|
147 |
typing_indicator.markdown(mensaje_completo)
|
148 |
|
149 |
add_message(MODEL_ROLE, mensaje_completo, AI_AVATAR_ICON)
|
@@ -259,6 +259,20 @@ with st.sidebar:
|
|
259 |
|
260 |
st.write('# Chats Anteriores')
|
261 |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
262 |
chat_options = [new_chat_id] + list(st.session_state.chats_in_memory.keys())
|
263 |
current_index = chat_options.index(st.session_state.current_chat_id) if st.session_state.current_chat_id in chat_options else 0
|
264 |
|
@@ -283,7 +297,7 @@ with st.sidebar:
|
|
283 |
})
|
284 |
|
285 |
# Reiniciar el chat con el modelo
|
286 |
-
st.session_state.chat = st.session_state.model.start_chat(history=
|
287 |
st.session_state.chat.send_message(SYSTEM_PROMPT)
|
288 |
st.session_state.gemini_history = st.session_state.chat.history
|
289 |
st.rerun()
|
@@ -367,6 +381,29 @@ with chat_container:
|
|
367 |
process_model_response(prompt)
|
368 |
update_chat_memory()
|
369 |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
370 |
# Procesar nueva entrada si existe
|
371 |
if new_prompt:
|
372 |
with st.chat_message("user", avatar=USER_AVATAR_ICON):
|
@@ -401,4 +438,4 @@ with chat_container:
|
|
401 |
st.session_state.chat = st.session_state.model.start_chat(history=[])
|
402 |
st.session_state.chat.send_message(SYSTEM_PROMPT)
|
403 |
st.session_state.gemini_history = st.session_state.chat.history
|
404 |
-
st.rerun()
|
|
|
129 |
with st.chat_message(name=MODEL_ROLE, avatar=AI_AVATAR_ICON):
|
130 |
# Añade un indicador de "escribiendo..."
|
131 |
typing_indicator = st.empty()
|
132 |
+
typing_indicator.markdown("*🤖 RoboCopy está escribiendo...*")
|
133 |
|
134 |
response = st.session_state.chat.send_message(prompt, stream=True)
|
135 |
mensaje_completo = ""
|
|
|
141 |
for caracter in chunk.text:
|
142 |
mensaje_actual += caracter
|
143 |
typing_indicator.markdown(mensaje_actual + "▌")
|
144 |
+
time.sleep(0.02) # Velocidad de escritura ajustada
|
145 |
|
146 |
+
# Mostrar mensaje final y limpiar indicador
|
147 |
typing_indicator.markdown(mensaje_completo)
|
148 |
|
149 |
add_message(MODEL_ROLE, mensaje_completo, AI_AVATAR_ICON)
|
|
|
259 |
|
260 |
st.write('# Chats Anteriores')
|
261 |
|
262 |
+
# Añadir botón de nuevo chat
|
263 |
+
if st.button('📝 Nuevo Chat'):
|
264 |
+
st.session_state.update({
|
265 |
+
'current_chat_id': str(time.time()),
|
266 |
+
'messages': [],
|
267 |
+
'gemini_history': [],
|
268 |
+
'chat_title': 'Nuevo Chat',
|
269 |
+
'show_examples': True
|
270 |
+
})
|
271 |
+
st.session_state.chat = st.session_state.model.start_chat(history=[])
|
272 |
+
st.session_state.chat.send_message(SYSTEM_PROMPT)
|
273 |
+
st.session_state.gemini_history = st.session_state.chat.history
|
274 |
+
st.rerun()
|
275 |
+
|
276 |
chat_options = [new_chat_id] + list(st.session_state.chats_in_memory.keys())
|
277 |
current_index = chat_options.index(st.session_state.current_chat_id) if st.session_state.current_chat_id in chat_options else 0
|
278 |
|
|
|
297 |
})
|
298 |
|
299 |
# Reiniciar el chat con el modelo
|
300 |
+
st.session_state.chat = st.session_state.model.start_chat(history=st.session_state.gemini_history)
|
301 |
st.session_state.chat.send_message(SYSTEM_PROMPT)
|
302 |
st.session_state.gemini_history = st.session_state.chat.history
|
303 |
st.rerun()
|
|
|
381 |
process_model_response(prompt)
|
382 |
update_chat_memory()
|
383 |
|
384 |
+
# Procesar nueva entrada si existe
|
385 |
+
if new_prompt:
|
386 |
+
with st.chat_message("user", avatar=USER_AVATAR_ICON):
|
387 |
+
st.markdown(new_prompt)
|
388 |
+
add_message("user", new_prompt, USER_AVATAR_ICON)
|
389 |
+
|
390 |
+
try:
|
391 |
+
title_response = st.session_state.model.generate_content(
|
392 |
+
f"Título para consulta: '{new_prompt}' (máximo 4 palabras)"
|
393 |
+
)
|
394 |
+
st.session_state.chat_title = title_response.text.strip()[:25]
|
395 |
+
except Exception as e:
|
396 |
+
st.session_state.chat_title = f"Chat-{time.strftime('%H:%M')}"
|
397 |
+
|
398 |
+
st.session_state.chats_in_memory[st.session_state.current_chat_id] = {
|
399 |
+
'messages': st.session_state.messages,
|
400 |
+
'gemini_history': st.session_state.gemini_history,
|
401 |
+
'title': st.session_state.chat_title
|
402 |
+
}
|
403 |
+
|
404 |
+
process_model_response(new_prompt)
|
405 |
+
update_chat_memory()
|
406 |
+
|
407 |
# Procesar nueva entrada si existe
|
408 |
if new_prompt:
|
409 |
with st.chat_message("user", avatar=USER_AVATAR_ICON):
|
|
|
438 |
st.session_state.chat = st.session_state.model.start_chat(history=[])
|
439 |
st.session_state.chat.send_message(SYSTEM_PROMPT)
|
440 |
st.session_state.gemini_history = st.session_state.chat.history
|
441 |
+
st.rerun()
|