Spaces:
Running
Running
Upload 10 files
Browse files
app.py
CHANGED
@@ -93,6 +93,34 @@ except:
|
|
93 |
st.session_state.messages = []
|
94 |
st.session_state.gemini_history = []
|
95 |
print('new_cache made')
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
96 |
st.session_state.model = genai.GenerativeModel('gemini-2.0-flash')
|
97 |
st.session_state.chat = st.session_state.model.start_chat(
|
98 |
history=st.session_state.gemini_history,
|
@@ -152,17 +180,8 @@ if prompt := st.chat_input('¿En qué puedo ayudarte con tu Propuesta Única de
|
|
152 |
)
|
153 |
)
|
154 |
|
155 |
-
#
|
156 |
-
|
157 |
-
|
158 |
-
# Crear una instancia del modelo con el system prompt
|
159 |
-
model = genai.GenerativeModel(
|
160 |
-
model_name='gemini-2.0-flash',
|
161 |
-
system_instruction=system_prompt
|
162 |
-
)
|
163 |
-
|
164 |
-
# Enviar mensaje al AI con system prompt
|
165 |
-
response_stream = model.generate_content(
|
166 |
prompt,
|
167 |
stream=True,
|
168 |
)
|
@@ -207,19 +226,8 @@ if prompt := st.chat_input('¿En qué puedo ayudarte con tu Propuesta Única de
|
|
207 |
)
|
208 |
)
|
209 |
|
210 |
-
# Actualizar el historial de Gemini
|
211 |
-
|
212 |
-
st.session_state.gemini_history = []
|
213 |
-
|
214 |
-
st.session_state.gemini_history.append({
|
215 |
-
'role': 'user',
|
216 |
-
'parts': [{'text': prompt}]
|
217 |
-
})
|
218 |
-
|
219 |
-
st.session_state.gemini_history.append({
|
220 |
-
'role': 'model',
|
221 |
-
'parts': [{'text': full_response}]
|
222 |
-
})
|
223 |
|
224 |
# Save to file
|
225 |
joblib.dump(
|
|
|
93 |
st.session_state.messages = []
|
94 |
st.session_state.gemini_history = []
|
95 |
print('new_cache made')
|
96 |
+
|
97 |
+
# Inicializar el modelo con system prompt solo si no existe o si cambiamos de chat
|
98 |
+
if not hasattr(st.session_state, 'puv_model') or st.session_state.get('current_chat_id') != st.session_state.chat_id:
|
99 |
+
system_prompt = get_puv_system_prompt()
|
100 |
+
st.session_state.puv_model = genai.GenerativeModel(
|
101 |
+
model_name='gemini-2.0-flash',
|
102 |
+
system_instruction=system_prompt
|
103 |
+
)
|
104 |
+
st.session_state.current_chat_id = st.session_state.chat_id
|
105 |
+
|
106 |
+
# Inicializar el chat con el historial existente
|
107 |
+
history_for_model = []
|
108 |
+
for msg in st.session_state.gemini_history:
|
109 |
+
if isinstance(msg, dict) and 'role' in msg and 'parts' in msg:
|
110 |
+
# El formato ya es correcto
|
111 |
+
history_for_model.append(msg)
|
112 |
+
elif isinstance(msg, dict) and 'role' in msg and 'content' in msg:
|
113 |
+
# Convertir del formato antiguo al nuevo
|
114 |
+
history_for_model.append({
|
115 |
+
'role': msg['role'],
|
116 |
+
'parts': [{'text': msg['content']}]
|
117 |
+
})
|
118 |
+
|
119 |
+
st.session_state.puv_chat = st.session_state.puv_model.start_chat(
|
120 |
+
history=history_for_model
|
121 |
+
)
|
122 |
+
|
123 |
+
# Modelo estándar para otras operaciones (como generar títulos)
|
124 |
st.session_state.model = genai.GenerativeModel('gemini-2.0-flash')
|
125 |
st.session_state.chat = st.session_state.model.start_chat(
|
126 |
history=st.session_state.gemini_history,
|
|
|
180 |
)
|
181 |
)
|
182 |
|
183 |
+
# Usar el chat con system prompt para generar la respuesta
|
184 |
+
response = st.session_state.puv_chat.send_message(
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
185 |
prompt,
|
186 |
stream=True,
|
187 |
)
|
|
|
226 |
)
|
227 |
)
|
228 |
|
229 |
+
# Actualizar el historial de Gemini
|
230 |
+
st.session_state.gemini_history = st.session_state.puv_chat.history
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
231 |
|
232 |
# Save to file
|
233 |
joblib.dump(
|