Spaces:
Running
Running
Upload 12 files
Browse files
app.py
CHANGED
@@ -66,10 +66,11 @@ def process_message(prompt, is_example=False):
|
|
66 |
|
67 |
with st.chat_message(MODEL_ROLE, avatar=AI_AVATAR_ICON):
|
68 |
try:
|
69 |
-
#
|
70 |
response = state.chat.send_message(
|
71 |
enhanced_prompt,
|
72 |
-
stream=True,
|
|
|
73 |
)
|
74 |
|
75 |
message_placeholder = st.empty()
|
@@ -83,7 +84,7 @@ def process_message(prompt, is_example=False):
|
|
83 |
for chunk in response:
|
84 |
for ch in chunk.text:
|
85 |
full_response += ch
|
86 |
-
time.sleep(0.01)
|
87 |
message_placeholder.write(full_response + '▌')
|
88 |
|
89 |
# Eliminar indicador de escritura
|
@@ -237,7 +238,9 @@ state.load_chat_history()
|
|
237 |
|
238 |
# Inicializar el modelo
|
239 |
state.initialize_model('gemini-2.0-flash')
|
240 |
-
|
|
|
|
|
241 |
|
242 |
# Mostrar mensajes del historial
|
243 |
for message in state.messages:
|
|
|
66 |
|
67 |
with st.chat_message(MODEL_ROLE, avatar=AI_AVATAR_ICON):
|
68 |
try:
|
69 |
+
# Forzar streaming siempre
|
70 |
response = state.chat.send_message(
|
71 |
enhanced_prompt,
|
72 |
+
stream=True, # Asegurar que siempre sea True
|
73 |
+
generation_config={"temperature": 0.9} # Mantener configuración consistente
|
74 |
)
|
75 |
|
76 |
message_placeholder = st.empty()
|
|
|
84 |
for chunk in response:
|
85 |
for ch in chunk.text:
|
86 |
full_response += ch
|
87 |
+
time.sleep(0.01) # Mantener delay consistente
|
88 |
message_placeholder.write(full_response + '▌')
|
89 |
|
90 |
# Eliminar indicador de escritura
|
|
|
238 |
|
239 |
# Inicializar el modelo
|
240 |
state.initialize_model('gemini-2.0-flash')
|
241 |
+
# Al inicio del archivo, asegurar que el estado se inicializa correctamente
|
242 |
+
if 'chat' not in st.session_state:
|
243 |
+
state.initialize_chat()
|
244 |
|
245 |
# Mostrar mensajes del historial
|
246 |
for message in state.messages:
|