Spaces:
Running
Running
Upload app.py
Browse files
app.py
CHANGED
@@ -133,14 +133,17 @@ def process_model_response(prompt, max_retries=3):
|
|
133 |
|
134 |
response = st.session_state.chat.send_message(prompt, stream=True)
|
135 |
mensaje_completo = ""
|
|
|
136 |
|
137 |
-
# Procesar la respuesta por chunks
|
138 |
for chunk in response:
|
139 |
mensaje_completo += chunk.text
|
140 |
-
|
141 |
-
|
|
|
|
|
142 |
|
143 |
-
# Mostrar mensaje final
|
144 |
typing_indicator.markdown(mensaje_completo)
|
145 |
|
146 |
add_message(MODEL_ROLE, mensaje_completo, AI_AVATAR_ICON)
|
@@ -270,10 +273,20 @@ with st.sidebar:
|
|
270 |
if st.button('🗑️ Borrar Historial de Chat Actual'):
|
271 |
if st.session_state.current_chat_id in st.session_state.chats_in_memory:
|
272 |
del st.session_state.chats_in_memory[st.session_state.current_chat_id]
|
273 |
-
|
274 |
-
|
275 |
-
|
276 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
277 |
|
278 |
# === CONFIGURACIÓN DEL MODELO ===
|
279 |
model = genai.GenerativeModel(model_name='gemini-2.0-flash')
|
@@ -375,4 +388,17 @@ with chat_container:
|
|
375 |
}
|
376 |
|
377 |
process_model_response(new_prompt)
|
378 |
-
update_chat_memory()
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
133 |
|
134 |
response = st.session_state.chat.send_message(prompt, stream=True)
|
135 |
mensaje_completo = ""
|
136 |
+
mensaje_actual = ""
|
137 |
|
138 |
+
# Procesar la respuesta por chunks con efecto de escritura
|
139 |
for chunk in response:
|
140 |
mensaje_completo += chunk.text
|
141 |
+
for caracter in chunk.text:
|
142 |
+
mensaje_actual += caracter
|
143 |
+
typing_indicator.markdown(mensaje_actual + "▌")
|
144 |
+
time.sleep(0.01) # Velocidad de escritura ajustable
|
145 |
|
146 |
+
# Mostrar mensaje final
|
147 |
typing_indicator.markdown(mensaje_completo)
|
148 |
|
149 |
add_message(MODEL_ROLE, mensaje_completo, AI_AVATAR_ICON)
|
|
|
273 |
if st.button('🗑️ Borrar Historial de Chat Actual'):
|
274 |
if st.session_state.current_chat_id in st.session_state.chats_in_memory:
|
275 |
del st.session_state.chats_in_memory[st.session_state.current_chat_id]
|
276 |
+
|
277 |
+
# Reiniciar completamente el estado del chat actual
|
278 |
+
st.session_state.update({
|
279 |
+
'messages': [],
|
280 |
+
'gemini_history': [],
|
281 |
+
'chat_title': 'Nuevo Chat',
|
282 |
+
'show_examples': True
|
283 |
+
})
|
284 |
+
|
285 |
+
# Reiniciar el chat con el modelo
|
286 |
+
st.session_state.chat = st.session_state.model.start_chat(history=[])
|
287 |
+
st.session_state.chat.send_message(SYSTEM_PROMPT)
|
288 |
+
st.session_state.gemini_history = st.session_state.chat.history
|
289 |
+
st.rerun()
|
290 |
|
291 |
# === CONFIGURACIÓN DEL MODELO ===
|
292 |
model = genai.GenerativeModel(model_name='gemini-2.0-flash')
|
|
|
388 |
}
|
389 |
|
390 |
process_model_response(new_prompt)
|
391 |
+
update_chat_memory()
|
392 |
+
# En la sección del SIDEBAR
|
393 |
+
if st.button('📝 Nuevo Chat'):
|
394 |
+
st.session_state.update({
|
395 |
+
'current_chat_id': str(time.time()),
|
396 |
+
'messages': [],
|
397 |
+
'gemini_history': [],
|
398 |
+
'chat_title': 'Nuevo Chat',
|
399 |
+
'show_examples': True
|
400 |
+
})
|
401 |
+
st.session_state.chat = st.session_state.model.start_chat(history=[])
|
402 |
+
st.session_state.chat.send_message(SYSTEM_PROMPT)
|
403 |
+
st.session_state.gemini_history = st.session_state.chat.history
|
404 |
+
st.rerun()
|