Spaces:
Runtime error
Runtime error
Upload 12 files
Browse files- app.py +3 -7
- session_state.py +43 -2
app.py
CHANGED
@@ -71,12 +71,8 @@ def process_message(prompt, is_example=False):
|
|
71 |
typing_indicator = st.empty()
|
72 |
typing_indicator.markdown("*Generando respuesta...*")
|
73 |
|
74 |
-
#
|
75 |
-
response = state.
|
76 |
-
enhanced_prompt,
|
77 |
-
stream=True,
|
78 |
-
generation_config={"temperature": 0.9}
|
79 |
-
)
|
80 |
|
81 |
full_response = ''
|
82 |
|
@@ -107,7 +103,7 @@ def process_message(prompt, is_example=False):
|
|
107 |
avatar=AI_AVATAR_ICON
|
108 |
)
|
109 |
|
110 |
-
# Guardar el historial actualizado
|
111 |
state.save_chat_history()
|
112 |
|
113 |
except Exception as e:
|
|
|
71 |
typing_indicator = st.empty()
|
72 |
typing_indicator.markdown("*Generando respuesta...*")
|
73 |
|
74 |
+
# Usar el nuevo m茅todo send_message
|
75 |
+
response = state.send_message(enhanced_prompt)
|
|
|
|
|
|
|
|
|
76 |
|
77 |
full_response = ''
|
78 |
|
|
|
103 |
avatar=AI_AVATAR_ICON
|
104 |
)
|
105 |
|
106 |
+
# Guardar el historial actualizado
|
107 |
state.save_chat_history()
|
108 |
|
109 |
except Exception as e:
|
session_state.py
CHANGED
@@ -110,10 +110,51 @@ class SessionState:
|
|
110 |
self.model = genai.GenerativeModel(model_name)
|
111 |
|
112 |
def initialize_chat(self, history=None):
|
113 |
-
"""Inicializa el chat con el modelo"""
|
114 |
if history is None:
|
115 |
history = self.gemini_history
|
116 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
117 |
|
118 |
def generate_chat_title(self, prompt, model_name='gemini-2.0-flash'):
|
119 |
"""Genera un t铆tulo para el chat basado en el primer mensaje"""
|
|
|
110 |
self.model = genai.GenerativeModel(model_name)
|
111 |
|
112 |
def initialize_chat(self, history=None):
|
113 |
+
"""Inicializa el chat con el modelo y asegura la persistencia del streaming"""
|
114 |
if history is None:
|
115 |
history = self.gemini_history
|
116 |
+
|
117 |
+
# Asegurar que el modelo est谩 inicializado
|
118 |
+
if self.model is None:
|
119 |
+
self.initialize_model()
|
120 |
+
|
121 |
+
# Configurar el chat con streaming activado
|
122 |
+
self.chat = self.model.start_chat(
|
123 |
+
history=history,
|
124 |
+
generation_config={
|
125 |
+
"temperature": 0.9,
|
126 |
+
"stream": True # Forzar streaming siempre
|
127 |
+
}
|
128 |
+
)
|
129 |
+
|
130 |
+
# Verificar que el chat se inicializ贸 correctamente
|
131 |
+
if self.chat is None:
|
132 |
+
raise ValueError("Error al inicializar el chat")
|
133 |
+
|
134 |
+
def send_message(self, prompt, stream=True):
|
135 |
+
"""M茅todo unificado para enviar mensajes y mantener el streaming"""
|
136 |
+
try:
|
137 |
+
if self.chat is None:
|
138 |
+
self.initialize_chat()
|
139 |
+
|
140 |
+
return self.chat.send_message(
|
141 |
+
prompt,
|
142 |
+
stream=stream, # Respetar el par谩metro de streaming
|
143 |
+
generation_config={
|
144 |
+
"temperature": 0.9
|
145 |
+
}
|
146 |
+
)
|
147 |
+
except Exception as e:
|
148 |
+
print(f"Error al enviar mensaje: {e}")
|
149 |
+
# Reintentar una vez si hay error
|
150 |
+
self.initialize_chat()
|
151 |
+
return self.chat.send_message(
|
152 |
+
prompt,
|
153 |
+
stream=stream,
|
154 |
+
generation_config={
|
155 |
+
"temperature": 0.9
|
156 |
+
}
|
157 |
+
)
|
158 |
|
159 |
def generate_chat_title(self, prompt, model_name='gemini-2.0-flash'):
|
160 |
"""Genera un t铆tulo para el chat basado en el primer mensaje"""
|