Spaces:
Running
Running
Upload 11 files
Browse files
README.md
CHANGED
@@ -1,11 +1,11 @@
|
|
1 |
---
|
2 |
license: mit
|
3 |
-
title:
|
4 |
sdk: streamlit
|
5 |
emoji: 馃弳
|
6 |
colorFrom: red
|
7 |
colorTo: yellow
|
8 |
pinned: true
|
9 |
sdk_version: 1.45.0
|
10 |
-
short_description:
|
11 |
---
|
|
|
1 |
---
|
2 |
license: mit
|
3 |
+
title: Chatbot_Gemini_Streamlit
|
4 |
sdk: streamlit
|
5 |
emoji: 馃弳
|
6 |
colorFrom: red
|
7 |
colorTo: yellow
|
8 |
pinned: true
|
9 |
sdk_version: 1.45.0
|
10 |
+
short_description: Transform your audience's thoughts into persuasive bullets
|
11 |
---
|
app.py
CHANGED
@@ -196,11 +196,9 @@ if prompt := st.chat_input('Describe tu producto/servicio y audiencia objetivo..
|
|
196 |
# Save this as a chat for later
|
197 |
if st.session_state.chat_id not in past_chats.keys():
|
198 |
# Es una nueva conversaci贸n, generemos un t铆tulo basado en el primer mensaje
|
199 |
-
# Primero, guardamos un t铆tulo temporal
|
200 |
temp_title = f'Sesi贸nChat-{st.session_state.chat_id}'
|
201 |
past_chats[st.session_state.chat_id] = temp_title
|
202 |
|
203 |
-
# Generamos un t铆tulo basado en el contenido del mensaje
|
204 |
try:
|
205 |
# Usamos el mismo modelo para generar un t铆tulo corto
|
206 |
title_generator = genai.GenerativeModel('gemini-2.0-flash')
|
@@ -222,7 +220,7 @@ if prompt := st.chat_input('Describe tu producto/servicio y audiencia objetivo..
|
|
222 |
else:
|
223 |
# Ya existe esta conversaci贸n, usamos el t铆tulo guardado
|
224 |
st.session_state.chat_title = past_chats[st.session_state.chat_id]
|
225 |
-
|
226 |
joblib.dump(past_chats, 'data/past_chats_list')
|
227 |
|
228 |
# Display user message in chat message container
|
@@ -240,6 +238,16 @@ if prompt := st.chat_input('Describe tu producto/servicio y audiencia objetivo..
|
|
240 |
# Construir el prompt para el modelo con todas las f贸rmulas disponibles
|
241 |
puv_expert_prompt = """You are a collaborative team of world-class experts working together to create exceptional Unique Value Propositions (UVPs) that convert audience into customers.
|
242 |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
243 |
INTERNAL ANALYSIS (DO NOT OUTPUT):
|
244 |
|
245 |
1. DEEP AVATAR ANALYSIS:
|
|
|
196 |
# Save this as a chat for later
|
197 |
if st.session_state.chat_id not in past_chats.keys():
|
198 |
# Es una nueva conversaci贸n, generemos un t铆tulo basado en el primer mensaje
|
|
|
199 |
temp_title = f'Sesi贸nChat-{st.session_state.chat_id}'
|
200 |
past_chats[st.session_state.chat_id] = temp_title
|
201 |
|
|
|
202 |
try:
|
203 |
# Usamos el mismo modelo para generar un t铆tulo corto
|
204 |
title_generator = genai.GenerativeModel('gemini-2.0-flash')
|
|
|
220 |
else:
|
221 |
# Ya existe esta conversaci贸n, usamos el t铆tulo guardado
|
222 |
st.session_state.chat_title = past_chats[st.session_state.chat_id]
|
223 |
+
|
224 |
joblib.dump(past_chats, 'data/past_chats_list')
|
225 |
|
226 |
# Display user message in chat message container
|
|
|
238 |
# Construir el prompt para el modelo con todas las f贸rmulas disponibles
|
239 |
puv_expert_prompt = """You are a collaborative team of world-class experts working together to create exceptional Unique Value Propositions (UVPs) that convert audience into customers.
|
240 |
|
241 |
+
IMPORTANT: Always maintain a friendly and conversational tone. When users express gratitude or make casual comments:
|
242 |
+
1. Acknowledge their message first (e.g., "隆De nada! Me alegra poder ayudarte.")
|
243 |
+
2. Then continue with the conversation naturally
|
244 |
+
3. Show personality and empathy in your responses
|
245 |
+
4. Use emojis occasionally to add warmth 馃槉
|
246 |
+
|
247 |
+
If the user says "gracias" or similar:
|
248 |
+
- Respond warmly first
|
249 |
+
- Then ask if they'd like to continue working on their PUV or if tienen alguna otra pregunta
|
250 |
+
|
251 |
INTERNAL ANALYSIS (DO NOT OUTPUT):
|
252 |
|
253 |
1. DEEP AVATAR ANALYSIS:
|