Spaces:
Sleeping
Sleeping
Update app.py
Browse files
app.py
CHANGED
@@ -1,3 +1,4 @@
|
|
|
|
1 |
import gradio as gr
|
2 |
from openai import OpenAI
|
3 |
import openai # Import top-level for error types
|
@@ -27,7 +28,7 @@ OPENROUTER_TEXT_MODEL = "google/gemini-pro-1.5" # Modèle OpenRouter par défaut
|
|
27 |
OPENAI_TEXT_MODEL = "gpt-4o-mini" # ou "gpt-4o"
|
28 |
OPENAI_IMAGE_MODEL = "dall-e-3"
|
29 |
|
30 |
-
# ---
|
31 |
class BiasInfo(BaseModel):
|
32 |
bias_type: str = Field(..., description="Type de biais identifié (ex: Stéréotype de genre, Biais de confirmation)")
|
33 |
explanation: str = Field(..., description="Explication de pourquoi cela pourrait être un biais dans ce contexte.")
|
@@ -37,8 +38,8 @@ class BiasAnalysisResponse(BaseModel):
|
|
37 |
detected_biases: list[BiasInfo] = Field(default_factory=list, description="Liste des biais potentiels détectés.")
|
38 |
overall_comment: str = Field(default="", description="Commentaire général ou indication si aucun biais majeur n'est détecté.")
|
39 |
|
40 |
-
# --- Fonctions Utilitaires (
|
41 |
-
|
42 |
# Dictionnaires de correspondance (Inchangés)
|
43 |
posture_mapping = {"": "","Debout": "standing up","Assis": "sitting","Allongé": "lying down","Accroupi": "crouching","En mouvement": "moving","Reposé": "resting"}
|
44 |
facial_expression_mapping = {"": "","Souriant": "smiling","Sérieux": "serious","Triste": "sad","En colère": "angry","Surpris": "surprised","Pensif": "thoughtful"}
|
@@ -73,35 +74,98 @@ def clean_json_response(raw_response):
|
|
73 |
if match:
|
74 |
return match.group(1)
|
75 |
# Recherche d'un objet JSON commençant par { et finissant par }
|
76 |
-
|
77 |
-
|
78 |
-
|
|
|
|
|
79 |
try:
|
80 |
-
json.loads(
|
81 |
-
return
|
82 |
except json.JSONDecodeError:
|
83 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
84 |
|
85 |
-
#
|
86 |
return raw_response.strip()
|
87 |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
88 |
# --- Fonctions Principales de l'Application (Mises à jour) ---
|
89 |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
90 |
def analyze_biases_v2(app_config, objective_text, session_log_state):
|
91 |
"""Analyse les biais dans l'objectif marketing (utilise le client API actif)."""
|
92 |
log = session_log_state
|
93 |
log = update_log(f"Analyse Biais Objectif (début): '{objective_text[:50]}...'", log)
|
94 |
|
95 |
if not objective_text:
|
96 |
-
|
97 |
|
98 |
-
|
99 |
-
|
|
|
|
|
100 |
|
101 |
-
active_client = app_config["client"]
|
102 |
model_name = app_config["text_model"]
|
103 |
|
104 |
-
#
|
105 |
system_prompt = f"""
|
106 |
Tu es un expert en marketing éthique et en psychologie cognitive, spécialisé dans la création de personas.
|
107 |
Analyse l'objectif marketing suivant : "{objective_text}"
|
@@ -131,60 +195,63 @@ def analyze_biases_v2(app_config, objective_text, session_log_state):
|
|
131 |
Réponds en français. S'il n'y a pas de biais clair, retourne une liste 'detected_biases' vide et indique-le dans 'overall_comment'.
|
132 |
"""
|
133 |
|
134 |
-
response_content_str = "" # Init
|
135 |
try:
|
136 |
completion = active_client.chat.completions.create(
|
137 |
model=model_name,
|
138 |
messages=[
|
139 |
-
|
140 |
-
# Ici, on le met dans le system_prompt.
|
141 |
-
{"role": "user", "content": system_prompt}
|
142 |
],
|
143 |
temperature=0.4,
|
144 |
max_tokens=800,
|
145 |
-
# Demander explicitement du JSON si le modèle le supporte bien (OpenAI et certains OpenRouter le font)
|
146 |
response_format={"type": "json_object"},
|
147 |
)
|
148 |
|
149 |
response_content_str = completion.choices[0].message.content
|
150 |
-
# Nettoyage de la réponse avant parsing
|
151 |
cleaned_response_str = clean_json_response(response_content_str)
|
152 |
|
153 |
-
#
|
154 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
155 |
|
156 |
-
log = update_log(f"Analyse Biais Objectif (fin): Biais trouvés - {len(parsed_response.detected_biases)}", log)
|
157 |
-
return parsed_response.dict(), log
|
158 |
|
159 |
except openai.AuthenticationError as e:
|
160 |
-
|
161 |
-
|
162 |
-
|
163 |
-
|
164 |
except openai.RateLimitError as e:
|
165 |
-
|
166 |
-
|
167 |
-
|
168 |
-
|
169 |
except Exception as e:
|
170 |
error_msg = f"Erreur pendant l'analyse des biais: {str(e)}. Réponse brute: '{response_content_str[:200]}...'"
|
171 |
print(error_msg)
|
172 |
-
log = update_log(f"ERREUR Analyse Biais: {str(e)}", log)
|
173 |
-
#
|
174 |
return BiasAnalysisResponse(overall_comment=f"Erreur technique lors de l'analyse: {str(e)}").dict(), log
|
175 |
|
|
|
176 |
def display_bias_analysis_v2(analysis_result):
|
177 |
"""Formate l'analyse des biais pour l'affichage avec HighlightedText."""
|
178 |
# Prend directement le dict retourné par analyze_biases_v2
|
179 |
if not analysis_result:
|
180 |
-
return [("Aucune analyse effectuée.", None)]
|
181 |
|
182 |
biases = analysis_result.get("detected_biases", [])
|
183 |
overall_comment = analysis_result.get("overall_comment", "")
|
184 |
|
185 |
highlighted_data = []
|
186 |
if "Erreur" in overall_comment:
|
187 |
-
|
188 |
elif not biases:
|
189 |
highlighted_data.append((overall_comment or "Aucun biais majeur détecté.", "INFO"))
|
190 |
else:
|
@@ -195,10 +262,10 @@ def display_bias_analysis_v2(analysis_result):
|
|
195 |
highlighted_data.append((f"{bias_info.get('explanation', 'Pas d’explication.')}\n", "EXPLANATION"))
|
196 |
highlighted_data.append((f"💡 Conseil: {bias_info.get('advice', 'Pas de conseil.')}\n", "ADVICE"))
|
197 |
|
198 |
-
# Retourne les données formatées
|
199 |
-
# Le second output (bias_analysis_result_state) est mis à jour par la fonction appelante si nécessaire
|
200 |
return highlighted_data
|
201 |
|
|
|
202 |
def generate_persona_image_v2(app_config, *args):
|
203 |
"""Génère l'image du persona en utilisant OpenAI si activé, sinon retourne None."""
|
204 |
# Les 13 premiers args sont les inputs de l'image, le dernier est session_log_state
|
@@ -212,21 +279,22 @@ def generate_persona_image_v2(app_config, *args):
|
|
212 |
# Vérifier si la génération d'image est activée (nécessite clé OpenAI valide)
|
213 |
if not app_config.get("image_generation_enabled", False):
|
214 |
log = update_log("Génération Image: Désactivée (Clé API OpenAI non fournie/valide).", log)
|
215 |
-
# Retourne None pour l'image et le log mis à jour.
|
216 |
-
# On ajoute aussi un message utilisateur via gr.Info dans le .click
|
217 |
return None, log, "Génération d'image désactivée. Veuillez fournir une clé API OpenAI valide dans l'onglet Configuration."
|
218 |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
219 |
# Vérifier les champs obligatoires
|
220 |
if not first_name or not last_name or not age or not gender:
|
221 |
-
# Ne pas générer si infos de base manquantes
|
222 |
-
# Le message utilisateur sera géré dans le .click via gr.Info
|
223 |
return None, log, "Veuillez remplir Prénom, Nom, Âge et Genre pour générer l'image."
|
224 |
|
225 |
-
#
|
226 |
-
# Base
|
227 |
prompt_parts = [f"one person only, close-up portrait photo of {first_name} {last_name}, a {gender} aged {age}."] # Préciser "photo", "portrait"
|
228 |
-
|
229 |
-
# Détails (utilise les mappings pour traduire les choix FR en termes EN)
|
230 |
if skin_color_mapping.get(skin_color): prompt_parts.append(f"Skin tone: {skin_color_mapping[skin_color]}.")
|
231 |
if eye_color_mapping.get(eye_color): prompt_parts.append(f"Eye color: {eye_color_mapping[eye_color]}.")
|
232 |
if hair_style_mapping.get(hair_style): prompt_parts.append(f"Hairstyle: {hair_style_mapping[hair_style]}.")
|
@@ -236,20 +304,11 @@ def generate_persona_image_v2(app_config, *args):
|
|
236 |
if clothing_style_mapping.get(clothing_style): prompt_parts.append(f"Clothing style: {clothing_style_mapping[clothing_style]}.")
|
237 |
if accessories_mapping.get(accessories): prompt_parts.append(f"Wearing: {accessories_mapping[accessories]}.") # "Wearing" est souvent mieux pour les accessoires
|
238 |
if persona_description_en: prompt_parts.append(f"Background or context: {persona_description_en}.")
|
239 |
-
|
240 |
-
# Style final
|
241 |
prompt_parts.append("Realistic photo style, high detail, natural lighting.")
|
242 |
final_prompt = " ".join(prompt_parts)
|
243 |
|
244 |
log = update_log(f"Génération Image (début): Prompt='{final_prompt[:100]}...'", log)
|
245 |
|
246 |
-
# Utiliser le client OpenAI (car image_generation_enabled est True)
|
247 |
-
openai_client = app_config.get("client") # Devrait être le client OpenAI ici
|
248 |
-
if not isinstance(openai_client, OpenAI) or app_config.get("api_source") != "openai":
|
249 |
-
error_msg = "Erreur interne: Tentative de génération d'image sans client OpenAI valide."
|
250 |
-
log = update_log(f"ERREUR Génération Image: {error_msg}", log)
|
251 |
-
return None, log, error_msg
|
252 |
-
|
253 |
try:
|
254 |
response = openai_client.images.generate(
|
255 |
model=OPENAI_IMAGE_MODEL,
|
@@ -261,79 +320,70 @@ def generate_persona_image_v2(app_config, *args):
|
|
261 |
style="natural" # ou "vivid"
|
262 |
)
|
263 |
|
264 |
-
# Traitement de la réponse (URL ou B64)
|
265 |
image_url = response.data[0].url
|
266 |
-
# Alternative si b64_json:
|
267 |
-
# img_b64 = response.data[0].b64_json
|
268 |
-
# img_bytes = base64.b64decode(img_b64)
|
269 |
-
# pil_image = Image.open(io.BytesIO(img_bytes))
|
270 |
-
|
271 |
-
# Télécharger l'image depuis l'URL
|
272 |
img_response = requests.get(image_url)
|
273 |
img_response.raise_for_status() # Vérifie les erreurs HTTP
|
274 |
-
|
275 |
-
# Ouvrir l'image avec PIL depuis les bytes téléchargés
|
276 |
pil_image = Image.open(io.BytesIO(img_response.content))
|
277 |
|
278 |
log = update_log("Génération Image (fin): Succès.", log)
|
279 |
-
|
280 |
-
return pil_image, log, None
|
281 |
|
|
|
282 |
except openai.AuthenticationError as e:
|
283 |
-
|
284 |
-
|
285 |
-
|
286 |
-
|
287 |
except openai.RateLimitError as e:
|
288 |
-
|
289 |
-
|
290 |
-
|
291 |
-
|
292 |
except openai.BadRequestError as e: # Erreur fréquente si le prompt est refusé
|
293 |
-
|
294 |
-
|
295 |
-
|
296 |
-
|
297 |
except Exception as e:
|
298 |
error_msg = f"Erreur lors de la génération de l'image: {str(e)}"
|
299 |
print(error_msg)
|
300 |
log = update_log(f"ERREUR Génération Image: {str(e)}", log)
|
301 |
return None, log, error_msg
|
302 |
|
303 |
-
|
|
|
304 |
"""Affine les détails du persona (utilise le client API actif)."""
|
|
|
305 |
log = session_log_state
|
306 |
log = update_log(f"Refinement (début): Champ='{field_name}', Valeur initiale='{field_value[:50]}...'", log)
|
307 |
|
308 |
-
#
|
309 |
-
|
310 |
-
|
311 |
-
log = update_log(f"ERREUR Refinement:
|
312 |
-
|
313 |
-
return log, error_msg
|
314 |
|
315 |
-
active_client = app_config["client"]
|
316 |
model_name = app_config["text_model"]
|
317 |
|
318 |
-
#
|
319 |
biases_text = "Aucune analyse de biais précédente disponible ou chargée."
|
320 |
-
if
|
321 |
try:
|
322 |
-
|
323 |
-
detected_biases = bias_analysis_json_str.get("detected_biases", [])
|
324 |
if detected_biases:
|
325 |
biases_text = "\n".join([f"- {b.get('bias_type','N/A')}: {b.get('explanation','N/A')}" for b in detected_biases])
|
326 |
else:
|
327 |
-
biases_text = "Aucun biais majeur détecté lors de l'analyse initiale."
|
328 |
except Exception as e:
|
329 |
-
biases_text = f"Erreur lors de la lecture des biais analysés: {e}"
|
330 |
-
log = update_log(f"ERREUR Lecture Biais pour Refinement: {e}", log)
|
|
|
331 |
|
332 |
-
# Prompt
|
333 |
system_prompt = f"""
|
334 |
Tu es un assistant IA expert en marketing éthique, aidant à affiner le persona marketing pour '{first_name} {last_name}' ({age} ans).
|
335 |
L'objectif marketing initial était : "{marketing_objectives}"
|
336 |
-
L'analyse initiale de cet objectif a soulevé les
|
337 |
{biases_text}
|
338 |
|
339 |
Tâche: Concentre-toi UNIQUEMENT sur le champ '{field_name}' dont la valeur actuelle est '{field_value}'.
|
@@ -347,7 +397,7 @@ def refine_persona_details_v2(app_config, first_name, last_name, age, field_name
|
|
347 |
Si la valeur actuelle semble bonne ou si tu manques de contexte pour faire une suggestion pertinente, indique-le simplement (ex: "La valeur actuelle semble appropriée." ou "Difficile de suggérer sans plus de contexte.").
|
348 |
Réponds en français. Ne fournis QUE les suggestions ou le commentaire d'approbation/manque de contexte. Ne répète pas la question.
|
349 |
"""
|
350 |
-
suggestions = "" # Init
|
351 |
try:
|
352 |
response = active_client.chat.completions.create(
|
353 |
model=model_name,
|
@@ -358,25 +408,27 @@ def refine_persona_details_v2(app_config, first_name, last_name, age, field_name
|
|
358 |
suggestions = response.choices[0].message.content.strip()
|
359 |
|
360 |
log = update_log(f"Refinement (fin): Champ='{field_name}'. Suggestions: '{suggestions[:50]}...'", log)
|
361 |
-
#
|
362 |
return log, suggestions
|
363 |
|
|
|
364 |
except openai.AuthenticationError as e:
|
365 |
-
|
366 |
-
|
367 |
-
|
368 |
-
|
369 |
except openai.RateLimitError as e:
|
370 |
-
|
371 |
-
|
372 |
-
|
373 |
-
|
374 |
except Exception as e:
|
375 |
error_msg = f"Erreur lors du raffinement pour '{field_name}': {str(e)}"
|
376 |
print(error_msg)
|
377 |
log = update_log(f"ERREUR Refinement '{field_name}': {str(e)}", log)
|
378 |
return log, f"ERREUR: {error_msg}"
|
379 |
|
|
|
380 |
def generate_summary_v2(*args):
|
381 |
"""Génère le résumé HTML du persona (gestion image PIL)."""
|
382 |
# Le dernier arg est session_log_state, l'avant-dernier est persona_image (PIL ou None)
|
@@ -413,10 +465,15 @@ def generate_summary_v2(*args):
|
|
413 |
# Convertir l'image PIL en base64 pour l'intégrer directement
|
414 |
buffered = io.BytesIO()
|
415 |
# Sauvegarder en PNG (ou JPEG si préféré) dans le buffer mémoire
|
416 |
-
|
|
|
|
|
|
|
|
|
|
|
417 |
img_bytes = buffered.getvalue()
|
418 |
img_base64 = base64.b64encode(img_bytes).decode()
|
419 |
-
img_data_url = f"data:image/
|
420 |
image_html += f"<img src='{img_data_url}' alt='Persona {first_name}' style='max-width: 300px; height: auto; border: 1px solid #eee; border-radius: 5px; margin-top: 10px;'/>\n"
|
421 |
except Exception as e:
|
422 |
img_err_msg = f"Erreur encodage image: {e}"
|
@@ -427,9 +484,6 @@ def generate_summary_v2(*args):
|
|
427 |
|
428 |
# Section Informations Personnelles (Titre centré)
|
429 |
summary += f"<div style='text-align: center;'><h1>{first_name} {last_name}, {age} ans ({gender})</h1></div>\n"
|
430 |
-
# Utiliser persona_description_en ici car c'est ce qui a été utilisé pour l'image, ou ajouter un champ description générale FR ?
|
431 |
-
# Pour l'instant, on affiche pas la desc EN dans le résumé FR. Ajoutons un champ 'description_persona_fr' ?
|
432 |
-
# Solution simple: ne pas afficher de description ici ou ajouter un nouveau champ.
|
433 |
# summary += f"<p><i>{persona_description_en}</i></p>\n" # Commenté
|
434 |
|
435 |
# Assemblage des autres sections (avec vérification si champ rempli)
|
@@ -437,18 +491,27 @@ def generate_summary_v2(*args):
|
|
437 |
content = ""
|
438 |
for label, value in fields.items():
|
439 |
# N'ajoute que si la valeur existe (n'est pas None, False, 0, ou chaîne vide)
|
440 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
441 |
# Formatage spécial pour les revenus
|
442 |
if label == "Revenus annuels (€)" and isinstance(value, (int, float)):
|
443 |
# Format numérique avec séparateur de milliers (espace)
|
444 |
try:
|
445 |
-
|
|
|
446 |
except ValueError: # Gère le cas où income serait une chaîne ou autre chose
|
447 |
-
value_str = str(value) + "
|
448 |
else:
|
449 |
value_str = str(value)
|
450 |
-
# Remplace les sauts de ligne par <br> pour l'affichage HTML
|
451 |
-
value_str_html = value_str.replace("\n", "<br>")
|
452 |
content += f"<b>{label}:</b> {value_str_html}<br>\n"
|
453 |
if content:
|
454 |
# Ajoute un peu d'espace avant la section
|
@@ -472,30 +535,31 @@ def generate_summary_v2(*args):
|
|
472 |
"Scénarios d’utilisation typiques": usage_scenarios
|
473 |
})
|
474 |
summary += add_section("Contexte Professionnel/Vie Quotidienne", {
|
475 |
-
|
476 |
-
|
477 |
-
|
478 |
})
|
479 |
summary += add_section("Marketing & Considérations Spéciales", {
|
480 |
-
|
481 |
-
|
482 |
-
|
483 |
-
|
484 |
-
|
485 |
-
|
486 |
})
|
487 |
|
488 |
image_html += "</div>\n" # Ferme div image
|
489 |
|
490 |
-
# Assemblage final avec flexbox
|
491 |
final_html = "<div style='display: flex; flex-wrap: wrap; align-items: flex-start; font-family: sans-serif; padding: 10px;'>\n"
|
492 |
final_html += f"<div style='flex: 1; min-width: 350px; padding-right: 15px;'>\n{summary}</div>\n" # Colonne texte
|
493 |
final_html += image_html # Colonne image
|
494 |
final_html += "</div>"
|
495 |
|
496 |
-
#
|
497 |
return final_html, log
|
498 |
|
|
|
499 |
# --- Interface Gradio V2 (Mise à jour avec BYOK et suggestions) ---
|
500 |
|
501 |
with gr.Blocks(theme=gr.themes.Glass()) as demo:
|
@@ -503,18 +567,18 @@ with gr.Blocks(theme=gr.themes.Glass()) as demo:
|
|
503 |
gr.Markdown("Outil d'aide à la création de personas, intégrant l'IA générative (OpenRouter ou OpenAI) pour stimuler la créativité et la réflexivité face aux biais.")
|
504 |
|
505 |
# --- État Global Partagé ---
|
506 |
-
# Stocke la configuration active (
|
507 |
app_config_state = gr.State(value={
|
508 |
-
"client": None,
|
509 |
"api_source": None, # 'openai' or 'openrouter'
|
510 |
"text_model": None,
|
511 |
"image_generation_enabled": False,
|
512 |
-
"openai_key_provided": False,
|
513 |
"openrouter_key_provided": bool(openrouter_api_key)
|
514 |
})
|
515 |
-
# Stocke le résultat de l'analyse de biais (
|
516 |
bias_analysis_result_state = gr.State(value={})
|
517 |
-
# Stocke l'image générée (objet PIL ou None)
|
518 |
persona_image_pil_state = gr.State(value=None)
|
519 |
# Stocke le log de session (chaîne de caractères)
|
520 |
session_log_state = gr.State(value="")
|
@@ -528,7 +592,9 @@ with gr.Blocks(theme=gr.themes.Glass()) as demo:
|
|
528 |
def update_status_display(new_message, current_log):
|
529 |
# Met aussi à jour le log si un message est affiché
|
530 |
if new_message:
|
531 |
-
|
|
|
|
|
532 |
return new_message, current_log
|
533 |
|
534 |
# --- Onglets ---
|
@@ -543,99 +609,138 @@ with gr.Blocks(theme=gr.themes.Glass()) as demo:
|
|
543 |
if openrouter_api_key:
|
544 |
gr.Markdown("✅ Clé API **OpenRouter** trouvée dans l'environnement (`OPENROUTER_API_KEY`).")
|
545 |
else:
|
546 |
-
gr.Markdown("❌ **Clé API OpenRouter (`OPENROUTER_API_KEY`) non trouvée.**
|
547 |
|
548 |
# Champ pour la clé OpenAI (optionnelle)
|
549 |
openai_api_key_input = gr.Textbox(
|
550 |
label="Clé API OpenAI (Optionnelle)",
|
551 |
type="password",
|
552 |
placeholder="Entrez votre clé OpenAI ici pour activer DALL-E 3 et utiliser OpenAI pour le texte",
|
553 |
-
info="Si fournie, cette clé sera utilisée pour la génération d'images (DALL-E 3) ET pour l'analyse/raffinement de texte (GPT). Sinon, OpenRouter sera utilisé pour le texte et la génération d'images sera désactivée."
|
554 |
)
|
555 |
# Bouton pour appliquer la config (initialise les clients)
|
556 |
configure_api_button = gr.Button("Appliquer la Configuration API")
|
557 |
# Affichage du statut de la configuration active
|
558 |
api_status_display = gr.Markdown("Statut API: Non configuré.")
|
559 |
|
560 |
-
# Fonction de configuration des clients API
|
561 |
def configure_api_clients(openai_key, current_config, current_log):
|
562 |
openai_key_provided = bool(openai_key)
|
563 |
openrouter_key_available = current_config["openrouter_key_provided"]
|
564 |
status_msg = ""
|
565 |
config = current_config.copy() # Copie pour modification
|
566 |
|
567 |
-
client
|
|
|
|
|
|
|
568 |
api_source = None
|
569 |
text_model = None
|
570 |
image_enabled = False
|
|
|
571 |
|
572 |
# Priorité à OpenAI si clé fournie
|
573 |
if openai_key_provided:
|
574 |
try:
|
575 |
-
|
576 |
-
#
|
577 |
-
|
|
|
|
|
|
|
|
|
578 |
api_source = "openai"
|
579 |
text_model = OPENAI_TEXT_MODEL
|
580 |
image_enabled = True
|
581 |
status_msg = f"✅ Configuration **OpenAI** active (Modèle texte: `{text_model}`, Images: DALL-E 3 activé)."
|
582 |
config["openai_key_provided"] = True
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
583 |
except Exception as e:
|
584 |
-
status_msg = f"⚠️ Clé OpenAI fournie mais
|
585 |
log_msg = f"ERREUR API Config OpenAI: {e}"
|
586 |
current_log = update_log(log_msg, current_log)
|
587 |
print(log_msg)
|
588 |
-
# Reset OpenAI specific flags
|
589 |
config["openai_key_provided"] = False
|
590 |
-
openai_key_provided = False # Force fallback
|
591 |
|
592 |
# Fallback vers OpenRouter si clé OpenAI non fournie ou invalide, ET si clé OpenRouter existe
|
593 |
-
if
|
594 |
-
|
595 |
-
|
596 |
-
|
597 |
-
|
598 |
-
|
599 |
-
|
600 |
-
|
601 |
-
|
602 |
-
|
603 |
-
|
604 |
-
|
605 |
-
|
606 |
-
|
607 |
-
|
608 |
-
|
609 |
-
|
610 |
-
else:
|
611 |
-
status_msg = "❌ Aucune clé API valide (OpenAI ou OpenRouter) n'est configurée. L'application ne peut pas fonctionner."
|
612 |
-
client = None # Assure que le client est None
|
613 |
|
614 |
-
|
615 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
616 |
config["api_source"] = api_source
|
617 |
config["text_model"] = text_model
|
618 |
config["image_generation_enabled"] = image_enabled
|
619 |
|
620 |
-
log_msg = f"Configuration API appliquée. Source: {api_source or 'Aucune'}, Images: {'Actif' if image_enabled else 'Inactif'}."
|
621 |
-
|
|
|
|
|
622 |
|
623 |
-
#
|
624 |
-
#
|
625 |
-
return config, status_msg, current_log
|
626 |
|
627 |
-
# Lier le bouton de configuration
|
628 |
-
# La sortie met à jour : l'état de config, le markdown de statut, l'état du log,
|
629 |
-
# et l'interactivité du bouton de génération d'image (qui est dans un autre onglet)
|
630 |
-
# On a besoin de référencer le bouton de génération d'image ici. Il faut le définir avant.
|
631 |
-
# Solution : On va plutôt mettre à jour l'état `app_config_state`, et le bouton d'image
|
632 |
-
# lira cet état quand il sera cliqué. La désactivation visuelle se fera via un .change() sur l'état.
|
633 |
|
|
|
|
|
634 |
configure_api_button.click(
|
635 |
fn=configure_api_clients,
|
636 |
inputs=[openai_api_key_input, app_config_state, session_log_state],
|
637 |
outputs=[app_config_state, api_status_display, session_log_state]
|
638 |
-
# L'interactivité du bouton image sera gérée séparément via un .change()
|
639 |
)
|
640 |
|
641 |
# --- Onglet 1 : Objectif & Analyse Biais ---
|
@@ -676,22 +781,24 @@ with gr.Blocks(theme=gr.themes.Glass()) as demo:
|
|
676 |
analyze_button.click(
|
677 |
fn=analyze_biases_v2,
|
678 |
inputs=[app_config_state, objective_input, session_log_state],
|
679 |
-
outputs=[bias_analysis_result_state, session_log_state] #
|
680 |
).then(
|
681 |
fn=display_bias_analysis_v2,
|
682 |
-
inputs=bias_analysis_result_state, #
|
683 |
-
outputs=bias_analysis_output_highlighted #
|
684 |
).then(
|
685 |
-
|
686 |
-
|
687 |
-
|
688 |
-
|
689 |
)
|
690 |
|
691 |
# Action du bouton Enregistrer Réflexion
|
692 |
def log_user_reflection(reflection_text, log_state):
|
693 |
-
|
694 |
-
|
|
|
|
|
695 |
log_reflection_button.click(
|
696 |
fn=log_user_reflection,
|
697 |
inputs=[user_reflection_on_biases, session_log_state],
|
@@ -703,7 +810,8 @@ with gr.Blocks(theme=gr.themes.Glass()) as demo:
|
|
703 |
with gr.Tab("👤 Étape 2: Image & Infos Base", id=1):
|
704 |
gr.Markdown("### 2. Créez l'identité visuelle et les informations de base")
|
705 |
with gr.Row():
|
706 |
-
|
|
|
707 |
first_name_input = gr.Textbox(label="Prénom")
|
708 |
last_name_input = gr.Textbox(label="Nom")
|
709 |
age_input = gr.Slider(label="Âge", minimum=18, maximum=100, step=1, value=30)
|
@@ -726,11 +834,11 @@ with gr.Blocks(theme=gr.themes.Glass()) as demo:
|
|
726 |
reset_visuals_button = gr.Button("Réinitialiser Détails Visuels", size="sm")
|
727 |
|
728 |
with gr.Column(scale=1): # Colonne de droite pour l'image et le bouton
|
729 |
-
|
730 |
persona_image_output = gr.Image(label="Image du Persona", type="pil", height=400, interactive=False) # Non éditable par l'utilisateur
|
731 |
-
#
|
732 |
-
generate_image_button = gr.Button("🖼️ Générer / Mettre à jour l'Image")
|
733 |
-
gr.Markdown("<small>💡 **Attention :** Les IA génératrices d'images peuvent reproduire des stéréotypes. Utilisez les détails visuels avec discernement
|
734 |
|
735 |
# Logique de l'onglet 2
|
736 |
visual_inputs = [
|
@@ -740,73 +848,79 @@ with gr.Blocks(theme=gr.themes.Glass()) as demo:
|
|
740 |
reset_visuals_button.click(lambda: [""] * len(visual_inputs), outputs=visual_inputs)
|
741 |
|
742 |
# Action du bouton Générer Image
|
743 |
-
#
|
744 |
def handle_image_generation(*args):
|
745 |
-
#
|
746 |
-
#
|
747 |
app_config = args[0]
|
748 |
log_state = args[-1]
|
749 |
persona_inputs = args[1:-1] # first_name, last_name, etc.
|
750 |
|
|
|
751 |
pil_image, updated_log, error_message = generate_persona_image_v2(app_config, *persona_inputs, log_state)
|
752 |
|
753 |
-
#
|
754 |
-
|
755 |
-
|
756 |
-
|
757 |
-
|
758 |
-
|
|
|
|
|
|
|
759 |
|
760 |
-
#
|
761 |
-
if
|
762 |
-
|
763 |
-
status_update = "" # Ne pas mettre dans le statut global
|
764 |
|
765 |
-
|
|
|
766 |
|
|
|
767 |
generate_image_button.click(
|
768 |
fn=handle_image_generation,
|
769 |
-
inputs=[app_config_state] + [ #
|
770 |
first_name_input, last_name_input, age_input, gender_input, persona_description_en_input,
|
771 |
skin_color_input, eye_color_input, hair_style_input, hair_color_input,
|
772 |
facial_expression_input, posture_input, clothing_style_input, accessories_input,
|
773 |
-
session_log_state #
|
774 |
],
|
775 |
outputs=[
|
776 |
-
persona_image_pil_state, #
|
777 |
-
session_log_state,
|
778 |
-
status_message_state
|
779 |
]
|
780 |
-
).then( #
|
781 |
-
|
782 |
-
|
783 |
-
|
784 |
-
).then( #
|
785 |
-
|
786 |
-
|
787 |
-
|
788 |
)
|
789 |
|
790 |
-
#
|
791 |
app_config_state.change(
|
792 |
-
|
793 |
-
|
794 |
-
|
795 |
)
|
796 |
|
|
|
797 |
# --- Onglet 3 : Profil Détaillé & Raffinement IA ---
|
798 |
with gr.Tab("📝 Étape 3: Profil Détaillé & Raffinement IA", id=2):
|
799 |
gr.Markdown("### 3. Complétez les détails du persona")
|
800 |
gr.Markdown("Remplissez les champs suivants. Utilisez le bouton '💡 Affiner' pour obtenir des suggestions de l'IA visant à améliorer le champ spécifique, en tenant compte de votre objectif initial et des biais potentiels identifiés.")
|
801 |
|
802 |
-
#
|
803 |
with gr.Row():
|
804 |
with gr.Column():
|
805 |
gr.Markdown("#### Infos Socio-Démographiques")
|
806 |
marital_status_input = gr.Dropdown(label="État civil", choices=["", "Célibataire", "En couple", "Marié(e)", "Divorcé(e)", "Veuf(ve)"])
|
807 |
education_level_input = gr.Dropdown(label="Niveau d'éducation", choices=["", "Études secondaires", "Baccalauréat", "Licence", "Master", "Doctorat", "Autre"])
|
808 |
profession_input = gr.Textbox(label="Profession")
|
809 |
-
income_input = gr.Number(label="Revenus annuels (€)", minimum=0, step=1000)
|
810 |
|
811 |
gr.Markdown("#### Psychographie")
|
812 |
with gr.Row(equal_height=False):
|
@@ -840,7 +954,7 @@ with gr.Blocks(theme=gr.themes.Glass()) as demo:
|
|
840 |
usage_scenarios_input = gr.Textbox(label="Scénarios d'utilisation typiques", lines=2, scale=4)
|
841 |
refine_usage_scenarios_button = gr.Button("💡 Affiner", scale=1, size='sm')
|
842 |
|
843 |
-
#
|
844 |
with gr.Accordion("Autres Informations (Optionnel)", open=False):
|
845 |
with gr.Row():
|
846 |
with gr.Column():
|
@@ -877,40 +991,42 @@ with gr.Blocks(theme=gr.themes.Glass()) as demo:
|
|
877 |
refine_references_button = gr.Button("💡 Affiner", scale=1, size='sm')
|
878 |
|
879 |
|
880 |
-
#
|
881 |
-
def handle_refinement_request(app_config, fname, lname, age_val, field_name_display, field_val,
|
882 |
-
#
|
883 |
-
updated_log, result = refine_persona_details_v2(app_config, fname, lname, age_val, field_name_display, field_val,
|
884 |
-
|
885 |
-
|
886 |
-
|
887 |
-
|
888 |
-
|
889 |
-
|
890 |
-
|
891 |
-
|
892 |
-
|
893 |
-
|
|
|
|
|
894 |
else:
|
895 |
-
#
|
896 |
-
|
897 |
-
gr.Warning(f"Pas de suggestion reçue pour '{field_name_display}'.")
|
898 |
|
899 |
-
return updated_log,
|
900 |
|
901 |
-
#
|
902 |
def create_refine_handler(field_name_display, input_component):
|
903 |
-
|
904 |
return lambda app_conf, fname, lname, age_val, field_val, bias_state, objectives, log_state: \
|
905 |
handle_refinement_request(app_conf, fname, lname, age_val, field_name_display, field_val, bias_state, objectives, log_state)
|
906 |
|
907 |
-
#
|
908 |
common_inputs_refine = [app_config_state, first_name_input, last_name_input, age_input]
|
|
|
909 |
state_inputs_refine = [bias_analysis_result_state, objective_input, session_log_state]
|
910 |
-
#
|
911 |
common_outputs_refine = [session_log_state, status_message_state]
|
912 |
|
913 |
-
#
|
914 |
refine_buttons_map = {
|
915 |
refine_personality_traits_button: ("Traits de personnalité", personality_traits_input),
|
916 |
refine_values_beliefs_button: ("Valeurs et croyances", values_beliefs_input),
|
@@ -937,23 +1053,23 @@ with gr.Blocks(theme=gr.themes.Glass()) as demo:
|
|
937 |
fn=create_refine_handler(label, input_comp),
|
938 |
inputs=common_inputs_refine + [input_comp] + state_inputs_refine,
|
939 |
outputs=common_outputs_refine
|
940 |
-
|
941 |
-
fn=
|
942 |
inputs=[status_message_state, session_log_state],
|
943 |
outputs=[status_display, session_log_state]
|
944 |
-
|
945 |
|
946 |
|
947 |
# --- Onglet 4 : Résumé du Persona ---
|
948 |
with gr.Tab("📄 Étape 4: Résumé du Persona", id=3):
|
949 |
gr.Markdown("### 4. Visualisez le persona complet")
|
950 |
summary_button = gr.Button("Générer le Résumé du Persona")
|
951 |
-
#
|
952 |
summary_content = gr.Markdown(elem_classes="persona-summary", value="Cliquez sur 'Générer' pour voir le résumé.")
|
953 |
|
954 |
-
#
|
955 |
all_persona_inputs_for_summary = [
|
956 |
-
first_name_input, last_name_input, age_input, gender_input, persona_description_en_input,
|
957 |
skin_color_input, eye_color_input, hair_style_input, hair_color_input,
|
958 |
facial_expression_input, posture_input, clothing_style_input, accessories_input,
|
959 |
marital_status_input, education_level_input, profession_input, income_input,
|
@@ -962,79 +1078,110 @@ with gr.Blocks(theme=gr.themes.Glass()) as demo:
|
|
962 |
product_related_activities_input, pain_points_input, product_goals_input, usage_scenarios_input,
|
963 |
brand_relationship_input, market_segment_input, commercial_objectives_input,
|
964 |
visual_codes_input, special_considerations_input, daily_life_input, references_input,
|
965 |
-
#
|
966 |
-
persona_image_pil_state, #
|
967 |
session_log_state
|
968 |
]
|
969 |
|
970 |
summary_button.click(
|
971 |
fn=generate_summary_v2,
|
972 |
inputs=all_persona_inputs_for_summary,
|
973 |
-
outputs=[summary_content, session_log_state] #
|
974 |
)
|
975 |
|
976 |
# --- Onglet 5 : Journal de Bord ---
|
977 |
with gr.Tab("📓 Journal de Bord", id=4):
|
978 |
gr.Markdown("### Suivi du Processus de Création")
|
979 |
-
gr.Markdown("Ce journal enregistre les étapes clés et les erreurs de votre session.")
|
980 |
log_display_final = gr.Textbox(label="Historique de la session", lines=20, interactive=False, max_lines=MAX_LOG_LINES)
|
981 |
-
|
982 |
-
|
983 |
-
# log_file_output = gr.File(label="Télécharger le Journal", file_count="single", visible=False) # Caché initialement
|
984 |
-
download_log_button = gr.DownloadButton(label="Télécharger le Journal", visible=False)
|
985 |
-
|
986 |
|
987 |
-
#
|
988 |
session_log_state.change(
|
989 |
fn=lambda log_data: log_data,
|
990 |
inputs=session_log_state,
|
991 |
-
outputs=log_display_final
|
|
|
|
|
992 |
)
|
993 |
|
994 |
-
#
|
995 |
-
|
996 |
-
|
997 |
-
|
998 |
-
|
999 |
-
|
|
|
1000 |
|
1001 |
-
#
|
|
|
1002 |
def prepare_log_for_download(log_data):
|
1003 |
if not log_data:
|
1004 |
-
return gr.update(visible=False) #
|
1005 |
|
1006 |
-
# Créer un fichier texte temporaire que Gradio peut servir
|
1007 |
-
# Utiliser delete=False car Gradio doit pouvoir lire le fichier après le retour de la fonction
|
1008 |
try:
|
|
|
1009 |
with tempfile.NamedTemporaryFile(mode='w', delete=False, suffix='.txt', encoding='utf-8') as temp_file:
|
1010 |
temp_file.write(log_data)
|
1011 |
temp_filepath = temp_file.name
|
1012 |
print(f"Fichier log prêt pour téléchargement : {temp_filepath}")
|
1013 |
-
#
|
1014 |
-
#
|
1015 |
return gr.update(value=temp_filepath, visible=True)
|
1016 |
except Exception as e:
|
1017 |
print(f"Erreur création fichier log pour téléchargement: {e}")
|
1018 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1019 |
|
1020 |
export_log_button_final.click(
|
1021 |
-
|
1022 |
-
|
1023 |
-
|
1024 |
)
|
1025 |
|
1026 |
-
|
1027 |
-
#
|
1028 |
-
#
|
1029 |
-
# share=False est plus sûr par défaut, surtout avec des clés API
|
1030 |
if not openrouter_api_key:
|
1031 |
-
|
1032 |
-
|
1033 |
-
|
1034 |
-
|
1035 |
-
|
1036 |
-
|
1037 |
-
|
1038 |
-
|
1039 |
-
|
1040 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
# --- Imports and Initial Config (mostly unchanged) ---
|
2 |
import gradio as gr
|
3 |
from openai import OpenAI
|
4 |
import openai # Import top-level for error types
|
|
|
28 |
OPENAI_TEXT_MODEL = "gpt-4o-mini" # ou "gpt-4o"
|
29 |
OPENAI_IMAGE_MODEL = "dall-e-3"
|
30 |
|
31 |
+
# --- Pydantic Models (Unchanged) ---
|
32 |
class BiasInfo(BaseModel):
|
33 |
bias_type: str = Field(..., description="Type de biais identifié (ex: Stéréotype de genre, Biais de confirmation)")
|
34 |
explanation: str = Field(..., description="Explication de pourquoi cela pourrait être un biais dans ce contexte.")
|
|
|
38 |
detected_biases: list[BiasInfo] = Field(default_factory=list, description="Liste des biais potentiels détectés.")
|
39 |
overall_comment: str = Field(default="", description="Commentaire général ou indication si aucun biais majeur n'est détecté.")
|
40 |
|
41 |
+
# --- Fonctions Utilitaires (Unchanged, except maybe clean_json if needed later) ---
|
42 |
+
# ... (update_log, clean_json_response, mappings remain the same) ...
|
43 |
# Dictionnaires de correspondance (Inchangés)
|
44 |
posture_mapping = {"": "","Debout": "standing up","Assis": "sitting","Allongé": "lying down","Accroupi": "crouching","En mouvement": "moving","Reposé": "resting"}
|
45 |
facial_expression_mapping = {"": "","Souriant": "smiling","Sérieux": "serious","Triste": "sad","En colère": "angry","Surpris": "surprised","Pensif": "thoughtful"}
|
|
|
74 |
if match:
|
75 |
return match.group(1)
|
76 |
# Recherche d'un objet JSON commençant par { et finissant par }
|
77 |
+
# More robust search: find the first '{' and the last '}'
|
78 |
+
start = raw_response.find('{')
|
79 |
+
end = raw_response.rfind('}')
|
80 |
+
if start != -1 and end != -1 and end > start:
|
81 |
+
potential_json = raw_response[start:end+1]
|
82 |
try:
|
83 |
+
json.loads(potential_json)
|
84 |
+
return potential_json
|
85 |
except json.JSONDecodeError:
|
86 |
+
# Attempt to fix common issues like trailing commas (simple case)
|
87 |
+
cleaned = re.sub(r",\s*([}\]])", r"\1", potential_json)
|
88 |
+
try:
|
89 |
+
json.loads(cleaned)
|
90 |
+
return cleaned
|
91 |
+
except json.JSONDecodeError:
|
92 |
+
pass # Give up on this match
|
93 |
|
94 |
+
# If nothing works, return the raw response hoping it's already JSON or error handled elsewhere
|
95 |
return raw_response.strip()
|
96 |
|
97 |
+
|
98 |
+
# --- Holder for Active API Clients (Keep outside gr.Blocks) ---
|
99 |
+
# This avoids storing complex objects in gr.State, which causes the TypeError
|
100 |
+
active_api_client_holder = {
|
101 |
+
"client": None,
|
102 |
+
"openai_key": None # Store the validated key here temporarily if needed
|
103 |
+
}
|
104 |
+
|
105 |
# --- Fonctions Principales de l'Application (Mises à jour) ---
|
106 |
|
107 |
+
def get_active_client(app_config):
|
108 |
+
"""Retrieves the globally stored client based on app_config."""
|
109 |
+
api_source = app_config.get("api_source")
|
110 |
+
if not api_source:
|
111 |
+
return None, "API source not configured."
|
112 |
+
|
113 |
+
client = active_api_client_holder.get("client")
|
114 |
+
|
115 |
+
# Check if the stored client matches the configured source
|
116 |
+
if client:
|
117 |
+
if api_source == "openai" and not isinstance(client, OpenAI):
|
118 |
+
# Check if it's the OpenRouter client mistakenly stored
|
119 |
+
# This check might need refinement based on how you differentiate clients
|
120 |
+
pass # Assume it's correct for now, relies on configure_api_clients logic
|
121 |
+
elif api_source == "openrouter" and not client.base_url.startswith("https://openrouter.ai"):
|
122 |
+
pass # Assume it's correct
|
123 |
+
|
124 |
+
if not client:
|
125 |
+
# Attempt to re-initialize if missing (e.g., after script reload)
|
126 |
+
print("WARN: Active client not found in holder, attempting re-initialization based on config.")
|
127 |
+
if api_source == "openai" and active_api_client_holder.get("openai_key"):
|
128 |
+
try:
|
129 |
+
client = OpenAI(api_key=active_api_client_holder["openai_key"])
|
130 |
+
active_api_client_holder["client"] = client
|
131 |
+
print("Re-initialized OpenAI client.")
|
132 |
+
except Exception as e:
|
133 |
+
return None, f"Failed to re-initialize OpenAI client: {e}"
|
134 |
+
elif api_source == "openrouter" and openrouter_api_key:
|
135 |
+
try:
|
136 |
+
client = OpenAI(
|
137 |
+
base_url="https://openrouter.ai/api/v1",
|
138 |
+
api_key=openrouter_api_key,
|
139 |
+
)
|
140 |
+
active_api_client_holder["client"] = client
|
141 |
+
print("Re-initialized OpenRouter client.")
|
142 |
+
except Exception as e:
|
143 |
+
return None, f"Failed to re-initialize OpenRouter client: {e}"
|
144 |
+
else:
|
145 |
+
return None, f"Cannot re-initialize client for source '{api_source}'. Missing key or config."
|
146 |
+
|
147 |
+
|
148 |
+
if not client:
|
149 |
+
return None, f"API client for '{api_source}' is not available or failed to initialize."
|
150 |
+
|
151 |
+
return client, None # Return client and no error message
|
152 |
+
|
153 |
def analyze_biases_v2(app_config, objective_text, session_log_state):
|
154 |
"""Analyse les biais dans l'objectif marketing (utilise le client API actif)."""
|
155 |
log = session_log_state
|
156 |
log = update_log(f"Analyse Biais Objectif (début): '{objective_text[:50]}...'", log)
|
157 |
|
158 |
if not objective_text:
|
159 |
+
return BiasAnalysisResponse(overall_comment="Veuillez fournir un objectif marketing.").dict(), update_log("Analyse Biais: Objectif vide.", log)
|
160 |
|
161 |
+
active_client, error_msg = get_active_client(app_config)
|
162 |
+
if error_msg:
|
163 |
+
log = update_log(f"ERREUR Analyse Biais: {error_msg}", log)
|
164 |
+
return BiasAnalysisResponse(overall_comment=f"Erreur: {error_msg}").dict(), log
|
165 |
|
|
|
166 |
model_name = app_config["text_model"]
|
167 |
|
168 |
+
# --- System Prompt (Unchanged) ---
|
169 |
system_prompt = f"""
|
170 |
Tu es un expert en marketing éthique et en psychologie cognitive, spécialisé dans la création de personas.
|
171 |
Analyse l'objectif marketing suivant : "{objective_text}"
|
|
|
195 |
Réponds en français. S'il n'y a pas de biais clair, retourne une liste 'detected_biases' vide et indique-le dans 'overall_comment'.
|
196 |
"""
|
197 |
|
198 |
+
response_content_str = "" # Init for the bloc except
|
199 |
try:
|
200 |
completion = active_client.chat.completions.create(
|
201 |
model=model_name,
|
202 |
messages=[
|
203 |
+
{"role": "user", "content": system_prompt}
|
|
|
|
|
204 |
],
|
205 |
temperature=0.4,
|
206 |
max_tokens=800,
|
|
|
207 |
response_format={"type": "json_object"},
|
208 |
)
|
209 |
|
210 |
response_content_str = completion.choices[0].message.content
|
|
|
211 |
cleaned_response_str = clean_json_response(response_content_str)
|
212 |
|
213 |
+
# Try parsing the cleaned JSON response
|
214 |
+
try:
|
215 |
+
parsed_response = BiasAnalysisResponse.parse_raw(cleaned_response_str)
|
216 |
+
log = update_log(f"Analyse Biais Objectif (fin): Biais trouvés - {len(parsed_response.detected_biases)}", log)
|
217 |
+
return parsed_response.dict(), log
|
218 |
+
except Exception as parse_error:
|
219 |
+
error_msg = f"Erreur parsing JSON après nettoyage: {parse_error}. Réponse nettoyée: '{cleaned_response_str[:200]}...'"
|
220 |
+
print(error_msg)
|
221 |
+
log = update_log(f"ERREUR Analyse Biais Parsing: {parse_error}", log)
|
222 |
+
return BiasAnalysisResponse(overall_comment=f"Erreur technique lors du parsing de la réponse: {parse_error}").dict(), log
|
223 |
|
|
|
|
|
224 |
|
225 |
except openai.AuthenticationError as e:
|
226 |
+
error_msg = f"Erreur d'authentification API ({app_config.get('api_source', 'Inconnu')}). Vérifiez votre clé."
|
227 |
+
print(error_msg)
|
228 |
+
log = update_log(f"ERREUR API Auth: {error_msg}", log)
|
229 |
+
return BiasAnalysisResponse(overall_comment=error_msg).dict(), log
|
230 |
except openai.RateLimitError as e:
|
231 |
+
error_msg = f"Erreur API ({app_config.get('api_source', 'Inconnu')}): Limite de taux atteinte. Réessayez plus tard."
|
232 |
+
print(error_msg)
|
233 |
+
log = update_log(f"ERREUR API RateLimit: {error_msg}", log)
|
234 |
+
return BiasAnalysisResponse(overall_comment=error_msg).dict(), log
|
235 |
except Exception as e:
|
236 |
error_msg = f"Erreur pendant l'analyse des biais: {str(e)}. Réponse brute: '{response_content_str[:200]}...'"
|
237 |
print(error_msg)
|
238 |
+
log = update_log(f"ERREUR Analyse Biais API Call: {str(e)}", log)
|
239 |
+
# Try to return a compatible error structure
|
240 |
return BiasAnalysisResponse(overall_comment=f"Erreur technique lors de l'analyse: {str(e)}").dict(), log
|
241 |
|
242 |
+
# --- display_bias_analysis_v2 (Unchanged) ---
|
243 |
def display_bias_analysis_v2(analysis_result):
|
244 |
"""Formate l'analyse des biais pour l'affichage avec HighlightedText."""
|
245 |
# Prend directement le dict retourné par analyze_biases_v2
|
246 |
if not analysis_result:
|
247 |
+
return [("Aucune analyse effectuée.", None)] # Retourne format HighlightedText
|
248 |
|
249 |
biases = analysis_result.get("detected_biases", [])
|
250 |
overall_comment = analysis_result.get("overall_comment", "")
|
251 |
|
252 |
highlighted_data = []
|
253 |
if "Erreur" in overall_comment:
|
254 |
+
highlighted_data.append((overall_comment, "ERROR")) # Étiquette spécifique pour erreurs
|
255 |
elif not biases:
|
256 |
highlighted_data.append((overall_comment or "Aucun biais majeur détecté.", "INFO"))
|
257 |
else:
|
|
|
262 |
highlighted_data.append((f"{bias_info.get('explanation', 'Pas d’explication.')}\n", "EXPLANATION"))
|
263 |
highlighted_data.append((f"💡 Conseil: {bias_info.get('advice', 'Pas de conseil.')}\n", "ADVICE"))
|
264 |
|
265 |
+
# Retourne les données formatées pour HighlightedText
|
|
|
266 |
return highlighted_data
|
267 |
|
268 |
+
|
269 |
def generate_persona_image_v2(app_config, *args):
|
270 |
"""Génère l'image du persona en utilisant OpenAI si activé, sinon retourne None."""
|
271 |
# Les 13 premiers args sont les inputs de l'image, le dernier est session_log_state
|
|
|
279 |
# Vérifier si la génération d'image est activée (nécessite clé OpenAI valide)
|
280 |
if not app_config.get("image_generation_enabled", False):
|
281 |
log = update_log("Génération Image: Désactivée (Clé API OpenAI non fournie/valide).", log)
|
|
|
|
|
282 |
return None, log, "Génération d'image désactivée. Veuillez fournir une clé API OpenAI valide dans l'onglet Configuration."
|
283 |
|
284 |
+
# Get the active OpenAI client (Image generation always uses OpenAI in this app)
|
285 |
+
# We assume configure_api_clients stored the *correct* client if image_gen is enabled
|
286 |
+
openai_client, error_msg = get_active_client(app_config)
|
287 |
+
if error_msg or app_config.get("api_source") != "openai":
|
288 |
+
final_error = f"Erreur interne ou mauvaise config pour Génération Image: {error_msg or 'Client non OpenAI actif'}"
|
289 |
+
log = update_log(f"ERREUR Génération Image: {final_error}", log)
|
290 |
+
return None, log, final_error
|
291 |
+
|
292 |
# Vérifier les champs obligatoires
|
293 |
if not first_name or not last_name or not age or not gender:
|
|
|
|
|
294 |
return None, log, "Veuillez remplir Prénom, Nom, Âge et Genre pour générer l'image."
|
295 |
|
296 |
+
# --- Build Prompt (Unchanged) ---
|
|
|
297 |
prompt_parts = [f"one person only, close-up portrait photo of {first_name} {last_name}, a {gender} aged {age}."] # Préciser "photo", "portrait"
|
|
|
|
|
298 |
if skin_color_mapping.get(skin_color): prompt_parts.append(f"Skin tone: {skin_color_mapping[skin_color]}.")
|
299 |
if eye_color_mapping.get(eye_color): prompt_parts.append(f"Eye color: {eye_color_mapping[eye_color]}.")
|
300 |
if hair_style_mapping.get(hair_style): prompt_parts.append(f"Hairstyle: {hair_style_mapping[hair_style]}.")
|
|
|
304 |
if clothing_style_mapping.get(clothing_style): prompt_parts.append(f"Clothing style: {clothing_style_mapping[clothing_style]}.")
|
305 |
if accessories_mapping.get(accessories): prompt_parts.append(f"Wearing: {accessories_mapping[accessories]}.") # "Wearing" est souvent mieux pour les accessoires
|
306 |
if persona_description_en: prompt_parts.append(f"Background or context: {persona_description_en}.")
|
|
|
|
|
307 |
prompt_parts.append("Realistic photo style, high detail, natural lighting.")
|
308 |
final_prompt = " ".join(prompt_parts)
|
309 |
|
310 |
log = update_log(f"Génération Image (début): Prompt='{final_prompt[:100]}...'", log)
|
311 |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
312 |
try:
|
313 |
response = openai_client.images.generate(
|
314 |
model=OPENAI_IMAGE_MODEL,
|
|
|
320 |
style="natural" # ou "vivid"
|
321 |
)
|
322 |
|
|
|
323 |
image_url = response.data[0].url
|
|
|
|
|
|
|
|
|
|
|
|
|
324 |
img_response = requests.get(image_url)
|
325 |
img_response.raise_for_status() # Vérifie les erreurs HTTP
|
|
|
|
|
326 |
pil_image = Image.open(io.BytesIO(img_response.content))
|
327 |
|
328 |
log = update_log("Génération Image (fin): Succès.", log)
|
329 |
+
return pil_image, log, None # Image, Log, No error message
|
|
|
330 |
|
331 |
+
# --- Error Handling (Unchanged) ---
|
332 |
except openai.AuthenticationError as e:
|
333 |
+
error_msg = f"Erreur d'authentification API OpenAI. Vérifiez votre clé."
|
334 |
+
print(error_msg)
|
335 |
+
log = update_log(f"ERREUR API Auth (Image): {error_msg}", log)
|
336 |
+
return None, log, error_msg # Retourne None pour l'image, log, et message d'erreur
|
337 |
except openai.RateLimitError as e:
|
338 |
+
error_msg = f"Erreur API OpenAI (Image): Limite de taux atteinte. Réessayez plus tard."
|
339 |
+
print(error_msg)
|
340 |
+
log = update_log(f"ERREUR API RateLimit (Image): {error_msg}", log)
|
341 |
+
return None, log, error_msg
|
342 |
except openai.BadRequestError as e: # Erreur fréquente si le prompt est refusé
|
343 |
+
error_msg = f"Erreur API OpenAI (Image): Requête invalide (prompt refusé ?). Détails: {e}"
|
344 |
+
print(error_msg)
|
345 |
+
log = update_log(f"ERREUR API BadRequest (Image): {error_msg}", log)
|
346 |
+
return None, log, error_msg
|
347 |
except Exception as e:
|
348 |
error_msg = f"Erreur lors de la génération de l'image: {str(e)}"
|
349 |
print(error_msg)
|
350 |
log = update_log(f"ERREUR Génération Image: {str(e)}", log)
|
351 |
return None, log, error_msg
|
352 |
|
353 |
+
|
354 |
+
def refine_persona_details_v2(app_config, first_name, last_name, age, field_name, field_value, bias_analysis_dict, marketing_objectives, session_log_state):
|
355 |
"""Affine les détails du persona (utilise le client API actif)."""
|
356 |
+
# Note: bias_analysis_json_str is now bias_analysis_dict
|
357 |
log = session_log_state
|
358 |
log = update_log(f"Refinement (début): Champ='{field_name}', Valeur initiale='{field_value[:50]}...'", log)
|
359 |
|
360 |
+
# Get active client
|
361 |
+
active_client, error_msg = get_active_client(app_config)
|
362 |
+
if error_msg:
|
363 |
+
log = update_log(f"ERREUR Refinement: {error_msg}", log)
|
364 |
+
return log, f"ERREUR: {error_msg}" # Return log and error message
|
|
|
365 |
|
|
|
366 |
model_name = app_config["text_model"]
|
367 |
|
368 |
+
# Process bias analysis results (now expects a dict)
|
369 |
biases_text = "Aucune analyse de biais précédente disponible ou chargée."
|
370 |
+
if bias_analysis_dict:
|
371 |
try:
|
372 |
+
detected_biases = bias_analysis_dict.get("detected_biases", [])
|
|
|
373 |
if detected_biases:
|
374 |
biases_text = "\n".join([f"- {b.get('bias_type','N/A')}: {b.get('explanation','N/A')}" for b in detected_biases])
|
375 |
else:
|
376 |
+
biases_text = bias_analysis_dict.get("overall_comment", "Aucun biais majeur détecté lors de l'analyse initiale.") # Use overall comment if no biases
|
377 |
except Exception as e:
|
378 |
+
biases_text = f"Erreur lors de la lecture des biais analysés (dict): {e}"
|
379 |
+
log = update_log(f"ERREUR Lecture Biais Dict pour Refinement: {e}", log)
|
380 |
+
|
381 |
|
382 |
+
# --- System Prompt (Unchanged, uses biases_text) ---
|
383 |
system_prompt = f"""
|
384 |
Tu es un assistant IA expert en marketing éthique, aidant à affiner le persona marketing pour '{first_name} {last_name}' ({age} ans).
|
385 |
L'objectif marketing initial était : "{marketing_objectives}"
|
386 |
+
L'analyse initiale de cet objectif a soulevé les points suivants :
|
387 |
{biases_text}
|
388 |
|
389 |
Tâche: Concentre-toi UNIQUEMENT sur le champ '{field_name}' dont la valeur actuelle est '{field_value}'.
|
|
|
397 |
Si la valeur actuelle semble bonne ou si tu manques de contexte pour faire une suggestion pertinente, indique-le simplement (ex: "La valeur actuelle semble appropriée." ou "Difficile de suggérer sans plus de contexte.").
|
398 |
Réponds en français. Ne fournis QUE les suggestions ou le commentaire d'approbation/manque de contexte. Ne répète pas la question.
|
399 |
"""
|
400 |
+
suggestions = "" # Init for the bloc except
|
401 |
try:
|
402 |
response = active_client.chat.completions.create(
|
403 |
model=model_name,
|
|
|
408 |
suggestions = response.choices[0].message.content.strip()
|
409 |
|
410 |
log = update_log(f"Refinement (fin): Champ='{field_name}'. Suggestions: '{suggestions[:50]}...'", log)
|
411 |
+
# Return updated log and suggestions (or None if error)
|
412 |
return log, suggestions
|
413 |
|
414 |
+
# --- Error Handling (Unchanged) ---
|
415 |
except openai.AuthenticationError as e:
|
416 |
+
error_msg = f"Erreur d'authentification API ({app_config.get('api_source', 'Inconnu')}) pendant raffinement. Vérifiez votre clé."
|
417 |
+
print(error_msg)
|
418 |
+
log = update_log(f"ERREUR API Auth (Refine): {error_msg}", log)
|
419 |
+
return log, f"ERREUR: {error_msg}" # Return error message for display
|
420 |
except openai.RateLimitError as e:
|
421 |
+
error_msg = f"Erreur API ({app_config.get('api_source', 'Inconnu')}) (Refine): Limite de taux atteinte."
|
422 |
+
print(error_msg)
|
423 |
+
log = update_log(f"ERREUR API RateLimit (Refine): {error_msg}", log)
|
424 |
+
return log, f"ERREUR: {error_msg}"
|
425 |
except Exception as e:
|
426 |
error_msg = f"Erreur lors du raffinement pour '{field_name}': {str(e)}"
|
427 |
print(error_msg)
|
428 |
log = update_log(f"ERREUR Refinement '{field_name}': {str(e)}", log)
|
429 |
return log, f"ERREUR: {error_msg}"
|
430 |
|
431 |
+
# --- generate_summary_v2 (Unchanged, already handles PIL image correctly) ---
|
432 |
def generate_summary_v2(*args):
|
433 |
"""Génère le résumé HTML du persona (gestion image PIL)."""
|
434 |
# Le dernier arg est session_log_state, l'avant-dernier est persona_image (PIL ou None)
|
|
|
465 |
# Convertir l'image PIL en base64 pour l'intégrer directement
|
466 |
buffered = io.BytesIO()
|
467 |
# Sauvegarder en PNG (ou JPEG si préféré) dans le buffer mémoire
|
468 |
+
# Handle potential RGBA issues for JPEG
|
469 |
+
img_to_save = persona_image_pil
|
470 |
+
if img_to_save.mode == 'RGBA' or 'transparency' in img_to_save.info:
|
471 |
+
img_to_save = img_to_save.convert('RGB') # Convert to RGB if it has alpha
|
472 |
+
|
473 |
+
img_to_save.save(buffered, format="JPEG") # Use JPEG for smaller size usually
|
474 |
img_bytes = buffered.getvalue()
|
475 |
img_base64 = base64.b64encode(img_bytes).decode()
|
476 |
+
img_data_url = f"data:image/jpeg;base64,{img_base64}"
|
477 |
image_html += f"<img src='{img_data_url}' alt='Persona {first_name}' style='max-width: 300px; height: auto; border: 1px solid #eee; border-radius: 5px; margin-top: 10px;'/>\n"
|
478 |
except Exception as e:
|
479 |
img_err_msg = f"Erreur encodage image: {e}"
|
|
|
484 |
|
485 |
# Section Informations Personnelles (Titre centré)
|
486 |
summary += f"<div style='text-align: center;'><h1>{first_name} {last_name}, {age} ans ({gender})</h1></div>\n"
|
|
|
|
|
|
|
487 |
# summary += f"<p><i>{persona_description_en}</i></p>\n" # Commenté
|
488 |
|
489 |
# Assemblage des autres sections (avec vérification si champ rempli)
|
|
|
491 |
content = ""
|
492 |
for label, value in fields.items():
|
493 |
# N'ajoute que si la valeur existe (n'est pas None, False, 0, ou chaîne vide)
|
494 |
+
# Exception for income == 0 which might be valid
|
495 |
+
should_add = False
|
496 |
+
if label == "Revenus annuels (€)":
|
497 |
+
# Add if value is not None (0 is a valid income)
|
498 |
+
should_add = value is not None
|
499 |
+
elif value: # Standard check for other fields
|
500 |
+
should_add = True
|
501 |
+
|
502 |
+
if should_add:
|
503 |
# Formatage spécial pour les revenus
|
504 |
if label == "Revenus annuels (€)" and isinstance(value, (int, float)):
|
505 |
# Format numérique avec séparateur de milliers (espace)
|
506 |
try:
|
507 |
+
# Use non-breaking space for thousands separator in HTML
|
508 |
+
value_str = f"{int(value):,} €".replace(",", " ")
|
509 |
except ValueError: # Gère le cas où income serait une chaîne ou autre chose
|
510 |
+
value_str = str(value) + " €"
|
511 |
else:
|
512 |
value_str = str(value)
|
513 |
+
# Remplace les sauts de ligne par <br> pour l'affichage HTML, escape HTML chars
|
514 |
+
value_str_html = markdown.markdown(value_str).replace('<p>', '').replace('</p>', '').strip().replace("\n", "<br>")
|
515 |
content += f"<b>{label}:</b> {value_str_html}<br>\n"
|
516 |
if content:
|
517 |
# Ajoute un peu d'espace avant la section
|
|
|
535 |
"Scénarios d’utilisation typiques": usage_scenarios
|
536 |
})
|
537 |
summary += add_section("Contexte Professionnel/Vie Quotidienne", {
|
538 |
+
"Responsabilités principales": main_responsibilities,
|
539 |
+
"Activités journalières": daily_activities,
|
540 |
+
"Une journée type / Citation": daily_life # Renommé pour correspondre au label
|
541 |
})
|
542 |
summary += add_section("Marketing & Considérations Spéciales", {
|
543 |
+
"Relation avec la marque": brand_relationship,
|
544 |
+
"Segment de marché": market_segment,
|
545 |
+
"Objectifs commerciaux (SMART)": commercial_objectives,
|
546 |
+
"Codes visuels / Marques préférées": visual_codes,
|
547 |
+
"Considérations spéciales (accessibilité, culture...)": special_considerations,
|
548 |
+
"Références / Sources de données": references
|
549 |
})
|
550 |
|
551 |
image_html += "</div>\n" # Ferme div image
|
552 |
|
553 |
+
# Assemblage final avec flexbox
|
554 |
final_html = "<div style='display: flex; flex-wrap: wrap; align-items: flex-start; font-family: sans-serif; padding: 10px;'>\n"
|
555 |
final_html += f"<div style='flex: 1; min-width: 350px; padding-right: 15px;'>\n{summary}</div>\n" # Colonne texte
|
556 |
final_html += image_html # Colonne image
|
557 |
final_html += "</div>"
|
558 |
|
559 |
+
# Return the generated HTML and updated log
|
560 |
return final_html, log
|
561 |
|
562 |
+
|
563 |
# --- Interface Gradio V2 (Mise à jour avec BYOK et suggestions) ---
|
564 |
|
565 |
with gr.Blocks(theme=gr.themes.Glass()) as demo:
|
|
|
567 |
gr.Markdown("Outil d'aide à la création de personas, intégrant l'IA générative (OpenRouter ou OpenAI) pour stimuler la créativité et la réflexivité face aux biais.")
|
568 |
|
569 |
# --- État Global Partagé ---
|
570 |
+
# Stocke la configuration active (flags, modèle, mais PAS le client objet)
|
571 |
app_config_state = gr.State(value={
|
572 |
+
# "client": None, # REMOVED - DO NOT STORE CLIENT OBJECT IN STATE
|
573 |
"api_source": None, # 'openai' or 'openrouter'
|
574 |
"text_model": None,
|
575 |
"image_generation_enabled": False,
|
576 |
+
"openai_key_provided": False, # Flag if key was entered
|
577 |
"openrouter_key_provided": bool(openrouter_api_key)
|
578 |
})
|
579 |
+
# Stocke le résultat de l'analyse de biais (dict)
|
580 |
bias_analysis_result_state = gr.State(value={})
|
581 |
+
# Stocke l'image générée (objet PIL ou None) - Keep this, gr.Image handles PIL
|
582 |
persona_image_pil_state = gr.State(value=None)
|
583 |
# Stocke le log de session (chaîne de caractères)
|
584 |
session_log_state = gr.State(value="")
|
|
|
592 |
def update_status_display(new_message, current_log):
|
593 |
# Met aussi à jour le log si un message est affiché
|
594 |
if new_message:
|
595 |
+
# Avoid logging redundant "success" messages or empty updates
|
596 |
+
if "ERREUR" in new_message or "WARN" in new_message or ("Configuration" in new_message and "active" in new_message) :
|
597 |
+
current_log = update_log(f"STATUS: {new_message}", current_log)
|
598 |
return new_message, current_log
|
599 |
|
600 |
# --- Onglets ---
|
|
|
609 |
if openrouter_api_key:
|
610 |
gr.Markdown("✅ Clé API **OpenRouter** trouvée dans l'environnement (`OPENROUTER_API_KEY`).")
|
611 |
else:
|
612 |
+
gr.Markdown("❌ **Clé API OpenRouter (`OPENROUTER_API_KEY`) non trouvée.** Le mode OpenRouter ne fonctionnera pas. Veuillez la définir dans vos variables d'environnement ou un fichier `.env` (ou utiliser une clé OpenAI).")
|
613 |
|
614 |
# Champ pour la clé OpenAI (optionnelle)
|
615 |
openai_api_key_input = gr.Textbox(
|
616 |
label="Clé API OpenAI (Optionnelle)",
|
617 |
type="password",
|
618 |
placeholder="Entrez votre clé OpenAI ici pour activer DALL-E 3 et utiliser OpenAI pour le texte",
|
619 |
+
info="Si fournie et valide, cette clé sera utilisée pour la génération d'images (DALL-E 3) ET pour l'analyse/raffinement de texte (GPT). Sinon, OpenRouter (si clé dispo) sera utilisé pour le texte et la génération d'images sera désactivée."
|
620 |
)
|
621 |
# Bouton pour appliquer la config (initialise les clients)
|
622 |
configure_api_button = gr.Button("Appliquer la Configuration API")
|
623 |
# Affichage du statut de la configuration active
|
624 |
api_status_display = gr.Markdown("Statut API: Non configuré.")
|
625 |
|
626 |
+
# Fonction de configuration des clients API (modifiée)
|
627 |
def configure_api_clients(openai_key, current_config, current_log):
|
628 |
openai_key_provided = bool(openai_key)
|
629 |
openrouter_key_available = current_config["openrouter_key_provided"]
|
630 |
status_msg = ""
|
631 |
config = current_config.copy() # Copie pour modification
|
632 |
|
633 |
+
# Clear previous client and stored key from holder
|
634 |
+
active_api_client_holder["client"] = None
|
635 |
+
active_api_client_holder["openai_key"] = None
|
636 |
+
|
637 |
api_source = None
|
638 |
text_model = None
|
639 |
image_enabled = False
|
640 |
+
client_to_store = None # The client object we will put in the global holder
|
641 |
|
642 |
# Priorité à OpenAI si clé fournie
|
643 |
if openai_key_provided:
|
644 |
try:
|
645 |
+
temp_client = OpenAI(api_key=openai_key)
|
646 |
+
# Simple test call (optional but good)
|
647 |
+
temp_client.models.list(limit=1) # Less expensive test
|
648 |
+
|
649 |
+
# If test succeeds:
|
650 |
+
client_to_store = temp_client
|
651 |
+
active_api_client_holder["openai_key"] = openai_key # Store key if needed for re-init
|
652 |
api_source = "openai"
|
653 |
text_model = OPENAI_TEXT_MODEL
|
654 |
image_enabled = True
|
655 |
status_msg = f"✅ Configuration **OpenAI** active (Modèle texte: `{text_model}`, Images: DALL-E 3 activé)."
|
656 |
config["openai_key_provided"] = True
|
657 |
+
current_log = update_log("Configuration: Client OpenAI initialisé et testé.", current_log)
|
658 |
+
|
659 |
+
except openai.AuthenticationError:
|
660 |
+
status_msg = "⚠️ Clé API OpenAI fournie mais **invalide**. Vérifiez la clé."
|
661 |
+
log_msg = f"ERREUR API Config OpenAI: Clé invalide."
|
662 |
+
current_log = update_log(log_msg, current_log)
|
663 |
+
print(log_msg)
|
664 |
+
config["openai_key_provided"] = False
|
665 |
+
openai_key_provided = False # Force fallback check
|
666 |
except Exception as e:
|
667 |
+
status_msg = f"⚠️ Clé OpenAI fournie mais erreur de connexion/test: {str(e)}. Vérifiez la clé et la connectivité."
|
668 |
log_msg = f"ERREUR API Config OpenAI: {e}"
|
669 |
current_log = update_log(log_msg, current_log)
|
670 |
print(log_msg)
|
|
|
671 |
config["openai_key_provided"] = False
|
672 |
+
openai_key_provided = False # Force fallback check
|
673 |
|
674 |
# Fallback vers OpenRouter si clé OpenAI non fournie ou invalide, ET si clé OpenRouter existe
|
675 |
+
# Use 'elif' to avoid configuring OpenRouter if OpenAI was successful
|
676 |
+
elif openrouter_key_available:
|
677 |
+
try:
|
678 |
+
temp_client = OpenAI(
|
679 |
+
base_url="https://openrouter.ai/api/v1",
|
680 |
+
api_key=openrouter_api_key,
|
681 |
+
)
|
682 |
+
# Simple test (e.g., list models - adapt if needed for OpenRouter structure)
|
683 |
+
# temp_client.models.list(limit=1) # Might need adjustment based on OpenRouter API/latency
|
684 |
+
|
685 |
+
client_to_store = temp_client
|
686 |
+
api_source = "openrouter"
|
687 |
+
text_model = OPENROUTER_TEXT_MODEL
|
688 |
+
image_enabled = False # Image désactivée avec OpenRouter
|
689 |
+
status_msg = f"✅ Configuration **OpenRouter** active (Modèle texte: `{text_model}`, Images: Désactivé)."
|
690 |
+
config["openai_key_provided"] = False # Ensure this is false
|
691 |
+
current_log = update_log("Configuration: Client OpenRouter initialisé.", current_log)
|
|
|
|
|
|
|
692 |
|
693 |
+
except Exception as e:
|
694 |
+
status_msg = f"❌ Erreur lors de l'initialisation d'OpenRouter (clé: {openrouter_api_key is not None}): {e}."
|
695 |
+
log_msg = f"ERREUR API Config OpenRouter: {e}"
|
696 |
+
current_log = update_log(log_msg, current_log)
|
697 |
+
print(log_msg)
|
698 |
+
client_to_store = None # Ensure no client is stored
|
699 |
+
api_source = None
|
700 |
+
text_model = None
|
701 |
+
image_enabled = False
|
702 |
+
config["openai_key_provided"] = False
|
703 |
+
|
704 |
+
else: # No valid OpenAI key provided AND no OpenRouter key available
|
705 |
+
if not openai_key_provided and not openrouter_key_available:
|
706 |
+
status_msg = "❌ Aucune clé API valide (ni OpenAI ni OpenRouter) n'est disponible/configurée. L'application ne peut pas fonctionner."
|
707 |
+
elif not openrouter_key_available: # OpenAI key was provided but failed, and no OpenRouter fallback
|
708 |
+
status_msg += " Et aucune clé OpenRouter n'est disponible comme alternative." # Append to previous OpenAI error msg
|
709 |
+
else: # Should not happen given the logic, but as a safeguard
|
710 |
+
status_msg = "❌ Impossible de configurer un client API."
|
711 |
+
|
712 |
+
client_to_store = None # Ensure no client is stored
|
713 |
+
api_source = None
|
714 |
+
text_model = None
|
715 |
+
image_enabled = False
|
716 |
+
config["openai_key_provided"] = False
|
717 |
+
|
718 |
+
|
719 |
+
# Store the successfully created client (or None) in the global holder
|
720 |
+
active_api_client_holder["client"] = client_to_store
|
721 |
+
|
722 |
+
# Mettre à jour l'état global (config flags only)
|
723 |
+
# config["client"] = client_to_store # DO NOT STORE CLIENT IN STATE
|
724 |
config["api_source"] = api_source
|
725 |
config["text_model"] = text_model
|
726 |
config["image_generation_enabled"] = image_enabled
|
727 |
|
728 |
+
log_msg = f"Configuration API appliquée. Source Active: {api_source or 'Aucune'}, Images: {'Actif' if image_enabled else 'Inactif'}."
|
729 |
+
# Avoid double logging if already logged above
|
730 |
+
if "Configuration:" not in log_msg:
|
731 |
+
current_log = update_log(log_msg, current_log)
|
732 |
|
733 |
+
# Return the new config state dict, the status message, and the updated log
|
734 |
+
# The interactivity update is handled by the .change() on app_config_state
|
735 |
+
return config, status_msg, current_log
|
736 |
|
|
|
|
|
|
|
|
|
|
|
|
|
737 |
|
738 |
+
# Link the configuration button
|
739 |
+
# Outputs: update app_config_state, api_status_display markdown, session_log_state
|
740 |
configure_api_button.click(
|
741 |
fn=configure_api_clients,
|
742 |
inputs=[openai_api_key_input, app_config_state, session_log_state],
|
743 |
outputs=[app_config_state, api_status_display, session_log_state]
|
|
|
744 |
)
|
745 |
|
746 |
# --- Onglet 1 : Objectif & Analyse Biais ---
|
|
|
781 |
analyze_button.click(
|
782 |
fn=analyze_biases_v2,
|
783 |
inputs=[app_config_state, objective_input, session_log_state],
|
784 |
+
outputs=[bias_analysis_result_state, session_log_state] # Stores the result dict + updates log
|
785 |
).then(
|
786 |
fn=display_bias_analysis_v2,
|
787 |
+
inputs=bias_analysis_result_state, # Uses the stored result (dict)
|
788 |
+
outputs=bias_analysis_output_highlighted # Displays formatted output
|
789 |
).then(
|
790 |
+
# Updates the global status display only if the analysis returned an error message
|
791 |
+
fn=lambda result, log: update_status_display(result.get("overall_comment", "") if "Erreur" in result.get("overall_comment", "") else "", log),
|
792 |
+
inputs=[bias_analysis_result_state, session_log_state],
|
793 |
+
outputs=[status_display, session_log_state]
|
794 |
)
|
795 |
|
796 |
# Action du bouton Enregistrer Réflexion
|
797 |
def log_user_reflection(reflection_text, log_state):
|
798 |
+
if reflection_text: # Only log if there's text
|
799 |
+
log = update_log(f"Réflexion Utilisateur (Étape 1): '{reflection_text}'", log_state)
|
800 |
+
return log
|
801 |
+
return log_state # Return unchanged log if input is empty
|
802 |
log_reflection_button.click(
|
803 |
fn=log_user_reflection,
|
804 |
inputs=[user_reflection_on_biases, session_log_state],
|
|
|
810 |
with gr.Tab("👤 Étape 2: Image & Infos Base", id=1):
|
811 |
gr.Markdown("### 2. Créez l'identité visuelle et les informations de base")
|
812 |
with gr.Row():
|
813 |
+
# FIX: Changed scale to integer
|
814 |
+
with gr.Column(scale=1): # Colonne de gauche pour les inputs (adjust scale integer as needed)
|
815 |
first_name_input = gr.Textbox(label="Prénom")
|
816 |
last_name_input = gr.Textbox(label="Nom")
|
817 |
age_input = gr.Slider(label="Âge", minimum=18, maximum=100, step=1, value=30)
|
|
|
834 |
reset_visuals_button = gr.Button("Réinitialiser Détails Visuels", size="sm")
|
835 |
|
836 |
with gr.Column(scale=1): # Colonne de droite pour l'image et le bouton
|
837 |
+
# Use type="pil" to handle image in memory
|
838 |
persona_image_output = gr.Image(label="Image du Persona", type="pil", height=400, interactive=False) # Non éditable par l'utilisateur
|
839 |
+
# This button's interactivity is controlled by app_config_state.change()
|
840 |
+
generate_image_button = gr.Button("🖼️ Générer / Mettre à jour l'Image", interactive=False) # Start disabled
|
841 |
+
gr.Markdown("<small>💡 **Attention :** Les IA génératrices d'images peuvent reproduire des stéréotypes. Utilisez les détails visuels avec discernement. La génération d'image nécessite une clé API OpenAI valide (voir onglet Configuration).</small>", elem_classes="warning")
|
842 |
|
843 |
# Logique de l'onglet 2
|
844 |
visual_inputs = [
|
|
|
848 |
reset_visuals_button.click(lambda: [""] * len(visual_inputs), outputs=visual_inputs)
|
849 |
|
850 |
# Action du bouton Générer Image
|
851 |
+
# Defines an intermediate function to handle multiple outputs and messages
|
852 |
def handle_image_generation(*args):
|
853 |
+
# The first input is app_config_state, the last is log_state
|
854 |
+
# The others are the persona fields
|
855 |
app_config = args[0]
|
856 |
log_state = args[-1]
|
857 |
persona_inputs = args[1:-1] # first_name, last_name, etc.
|
858 |
|
859 |
+
# Call the generation function (which now gets client via get_active_client)
|
860 |
pil_image, updated_log, error_message = generate_persona_image_v2(app_config, *persona_inputs, log_state)
|
861 |
|
862 |
+
# Prepare component updates
|
863 |
+
status_update_msg = "" # Message for the global status display
|
864 |
+
info_popup_msg = None # Message for gr.Info popup
|
865 |
+
|
866 |
+
if error_message:
|
867 |
+
if "Veuillez remplir" in error_message or "Génération d'image désactivée" in error_message:
|
868 |
+
info_popup_msg = error_message # Use popup for user guidance
|
869 |
+
else:
|
870 |
+
status_update_msg = error_message # Use global status for API/internal errors
|
871 |
|
872 |
+
# Show popup if needed
|
873 |
+
if info_popup_msg:
|
874 |
+
gr.Info(info_popup_msg)
|
|
|
875 |
|
876 |
+
# Return the PIL image (or None), the updated log, and the status message string
|
877 |
+
return pil_image, updated_log, status_update_msg
|
878 |
|
879 |
+
# Connect the button click
|
880 |
generate_image_button.click(
|
881 |
fn=handle_image_generation,
|
882 |
+
inputs=[app_config_state] + [ # Pass config state first
|
883 |
first_name_input, last_name_input, age_input, gender_input, persona_description_en_input,
|
884 |
skin_color_input, eye_color_input, hair_style_input, hair_color_input,
|
885 |
facial_expression_input, posture_input, clothing_style_input, accessories_input,
|
886 |
+
session_log_state # Pass log state last
|
887 |
],
|
888 |
outputs=[
|
889 |
+
persona_image_pil_state, # Update the PIL image state
|
890 |
+
session_log_state, # Update the log state
|
891 |
+
status_message_state # Update the status message state
|
892 |
]
|
893 |
+
).then( # Chain to update the image display from the state
|
894 |
+
fn=lambda img_state: img_state,
|
895 |
+
inputs=persona_image_pil_state,
|
896 |
+
outputs=persona_image_output
|
897 |
+
).then( # Chain to update the global status display from the state
|
898 |
+
fn=update_status_display, # Use the existing update function
|
899 |
+
inputs=[status_message_state, session_log_state],
|
900 |
+
outputs=[status_display, session_log_state]
|
901 |
)
|
902 |
|
903 |
+
# Update button interactivity when API config changes
|
904 |
app_config_state.change(
|
905 |
+
fn=lambda config: gr.update(interactive=config.get("image_generation_enabled", False)),
|
906 |
+
inputs=app_config_state,
|
907 |
+
outputs=generate_image_button
|
908 |
)
|
909 |
|
910 |
+
|
911 |
# --- Onglet 3 : Profil Détaillé & Raffinement IA ---
|
912 |
with gr.Tab("📝 Étape 3: Profil Détaillé & Raffinement IA", id=2):
|
913 |
gr.Markdown("### 3. Complétez les détails du persona")
|
914 |
gr.Markdown("Remplissez les champs suivants. Utilisez le bouton '💡 Affiner' pour obtenir des suggestions de l'IA visant à améliorer le champ spécifique, en tenant compte de votre objectif initial et des biais potentiels identifiés.")
|
915 |
|
916 |
+
# Organize into sections
|
917 |
with gr.Row():
|
918 |
with gr.Column():
|
919 |
gr.Markdown("#### Infos Socio-Démographiques")
|
920 |
marital_status_input = gr.Dropdown(label="État civil", choices=["", "Célibataire", "En couple", "Marié(e)", "Divorcé(e)", "Veuf(ve)"])
|
921 |
education_level_input = gr.Dropdown(label="Niveau d'éducation", choices=["", "Études secondaires", "Baccalauréat", "Licence", "Master", "Doctorat", "Autre"])
|
922 |
profession_input = gr.Textbox(label="Profession")
|
923 |
+
income_input = gr.Number(label="Revenus annuels (€)", minimum=0, step=1000) # Allow 0
|
924 |
|
925 |
gr.Markdown("#### Psychographie")
|
926 |
with gr.Row(equal_height=False):
|
|
|
954 |
usage_scenarios_input = gr.Textbox(label="Scénarios d'utilisation typiques", lines=2, scale=4)
|
955 |
refine_usage_scenarios_button = gr.Button("💡 Affiner", scale=1, size='sm')
|
956 |
|
957 |
+
# Other optional fields
|
958 |
with gr.Accordion("Autres Informations (Optionnel)", open=False):
|
959 |
with gr.Row():
|
960 |
with gr.Column():
|
|
|
991 |
refine_references_button = gr.Button("💡 Affiner", scale=1, size='sm')
|
992 |
|
993 |
|
994 |
+
# Intermediate function to handle refinement requests and display results
|
995 |
+
def handle_refinement_request(app_config, fname, lname, age_val, field_name_display, field_val, bias_state_dict, objectives, log_state):
|
996 |
+
# Call the refinement function
|
997 |
+
updated_log, result = refine_persona_details_v2(app_config, fname, lname, age_val, field_name_display, field_val, bias_state_dict, objectives, log_state)
|
998 |
+
|
999 |
+
status_update_msg = "" # For global status
|
1000 |
+
# Display the result (suggestions or error)
|
1001 |
+
if result:
|
1002 |
+
if "ERREUR:" in result:
|
1003 |
+
# Show API/internal errors in global status and as a warning popup
|
1004 |
+
status_update_msg = result
|
1005 |
+
gr.Warning(f"Erreur lors du raffinement pour '{field_name_display}'. Vérifiez le log et le statut.")
|
1006 |
+
else:
|
1007 |
+
# Show suggestions in an Info popup
|
1008 |
+
gr.Info(f"Suggestions pour '{field_name_display}':\n{result}")
|
1009 |
+
# No need to update global status for successful suggestions
|
1010 |
else:
|
1011 |
+
# Case where result is None or empty
|
1012 |
+
gr.Warning(f"Pas de suggestion reçue pour '{field_name_display}'.")
|
|
|
1013 |
|
1014 |
+
return updated_log, status_update_msg
|
1015 |
|
1016 |
+
# Generic lambda function to call the refinement handler
|
1017 |
def create_refine_handler(field_name_display, input_component):
|
1018 |
+
# The lambda takes the inputs required by handle_refinement_request
|
1019 |
return lambda app_conf, fname, lname, age_val, field_val, bias_state, objectives, log_state: \
|
1020 |
handle_refinement_request(app_conf, fname, lname, age_val, field_name_display, field_val, bias_state, objectives, log_state)
|
1021 |
|
1022 |
+
# Link each "Refine" button
|
1023 |
common_inputs_refine = [app_config_state, first_name_input, last_name_input, age_input]
|
1024 |
+
# Pass the bias_analysis_result_state (which holds the dict)
|
1025 |
state_inputs_refine = [bias_analysis_result_state, objective_input, session_log_state]
|
1026 |
+
# Outputs update the log and potentially the global status message state
|
1027 |
common_outputs_refine = [session_log_state, status_message_state]
|
1028 |
|
1029 |
+
# Map buttons to their corresponding input component and label
|
1030 |
refine_buttons_map = {
|
1031 |
refine_personality_traits_button: ("Traits de personnalité", personality_traits_input),
|
1032 |
refine_values_beliefs_button: ("Valeurs et croyances", values_beliefs_input),
|
|
|
1053 |
fn=create_refine_handler(label, input_comp),
|
1054 |
inputs=common_inputs_refine + [input_comp] + state_inputs_refine,
|
1055 |
outputs=common_outputs_refine
|
1056 |
+
).then( # Chain to update the global status display from the state
|
1057 |
+
fn=update_status_display,
|
1058 |
inputs=[status_message_state, session_log_state],
|
1059 |
outputs=[status_display, session_log_state]
|
1060 |
+
)
|
1061 |
|
1062 |
|
1063 |
# --- Onglet 4 : Résumé du Persona ---
|
1064 |
with gr.Tab("📄 Étape 4: Résumé du Persona", id=3):
|
1065 |
gr.Markdown("### 4. Visualisez le persona complet")
|
1066 |
summary_button = gr.Button("Générer le Résumé du Persona")
|
1067 |
+
# Use Markdown to display the HTML summary
|
1068 |
summary_content = gr.Markdown(elem_classes="persona-summary", value="Cliquez sur 'Générer' pour voir le résumé.")
|
1069 |
|
1070 |
+
# Collect all inputs for the summary IN THE CORRECT ORDER for generate_summary_v2
|
1071 |
all_persona_inputs_for_summary = [
|
1072 |
+
first_name_input, last_name_input, age_input, gender_input, persona_description_en_input,
|
1073 |
skin_color_input, eye_color_input, hair_style_input, hair_color_input,
|
1074 |
facial_expression_input, posture_input, clothing_style_input, accessories_input,
|
1075 |
marital_status_input, education_level_input, profession_input, income_input,
|
|
|
1078 |
product_related_activities_input, pain_points_input, product_goals_input, usage_scenarios_input,
|
1079 |
brand_relationship_input, market_segment_input, commercial_objectives_input,
|
1080 |
visual_codes_input, special_considerations_input, daily_life_input, references_input,
|
1081 |
+
# Add necessary states last
|
1082 |
+
persona_image_pil_state, # Pass the state containing the PIL image
|
1083 |
session_log_state
|
1084 |
]
|
1085 |
|
1086 |
summary_button.click(
|
1087 |
fn=generate_summary_v2,
|
1088 |
inputs=all_persona_inputs_for_summary,
|
1089 |
+
outputs=[summary_content, session_log_state] # Updates content and log
|
1090 |
)
|
1091 |
|
1092 |
# --- Onglet 5 : Journal de Bord ---
|
1093 |
with gr.Tab("📓 Journal de Bord", id=4):
|
1094 |
gr.Markdown("### Suivi du Processus de Création")
|
1095 |
+
gr.Markdown("Ce journal enregistre les étapes clés, les réflexions et les erreurs de votre session.")
|
1096 |
log_display_final = gr.Textbox(label="Historique de la session", lines=20, interactive=False, max_lines=MAX_LOG_LINES)
|
1097 |
+
# Use gr.DownloadButton for better UX
|
1098 |
+
download_log_button = gr.DownloadButton(label="Télécharger le Journal", visible=False) # Hidden initially
|
|
|
|
|
|
|
1099 |
|
1100 |
+
# Update log display when state changes
|
1101 |
session_log_state.change(
|
1102 |
fn=lambda log_data: log_data,
|
1103 |
inputs=session_log_state,
|
1104 |
+
outputs=log_display_final,
|
1105 |
+
# Add queue=False to make UI update instantly for log
|
1106 |
+
# queue=False # Might cause issues if log updates very rapidly? Test.
|
1107 |
)
|
1108 |
|
1109 |
+
# Update global status display when its state changes
|
1110 |
+
# This might be redundant if status updates always accompany log updates, but safe to keep
|
1111 |
+
# status_message_state.change(
|
1112 |
+
# fn=update_status_display,
|
1113 |
+
# inputs=[status_message_state, session_log_state],
|
1114 |
+
# outputs=[status_display, session_log_state]
|
1115 |
+
# )
|
1116 |
|
1117 |
+
# Function to prepare the log file for the DownloadButton
|
1118 |
+
# Keep this outside the click if possible, or ensure it's fast
|
1119 |
def prepare_log_for_download(log_data):
|
1120 |
if not log_data:
|
1121 |
+
return gr.update(visible=False) # Keep button hidden if no log
|
1122 |
|
|
|
|
|
1123 |
try:
|
1124 |
+
# Create a temporary text file Gradio can serve
|
1125 |
with tempfile.NamedTemporaryFile(mode='w', delete=False, suffix='.txt', encoding='utf-8') as temp_file:
|
1126 |
temp_file.write(log_data)
|
1127 |
temp_filepath = temp_file.name
|
1128 |
print(f"Fichier log prêt pour téléchargement : {temp_filepath}")
|
1129 |
+
# Return the path for the DownloadButton and make it visible
|
1130 |
+
# Gradio is supposed to handle cleanup.
|
1131 |
return gr.update(value=temp_filepath, visible=True)
|
1132 |
except Exception as e:
|
1133 |
print(f"Erreur création fichier log pour téléchargement: {e}")
|
1134 |
+
# Update status display with error?
|
1135 |
+
# update_status_display(f"Erreur export log: {e}", log_data) # Careful with state updates here
|
1136 |
+
return gr.update(visible=False) # Keep button hidden on error
|
1137 |
+
|
1138 |
+
|
1139 |
+
# Instead of a separate export button, trigger preparation when log changes? Or use DownloadButton directly?
|
1140 |
+
# Let's use the DownloadButton's direct file generation capability if possible.
|
1141 |
+
|
1142 |
+
# Simpler approach: Directly provide the generating function to DownloadButton
|
1143 |
+
def generate_log_content(log_data):
|
1144 |
+
if not log_data:
|
1145 |
+
return None # Or raise an error? Gradio might handle None better.
|
1146 |
+
# Return the content directly, Gradio handles file creation
|
1147 |
+
filename = f"personagenai_log_{datetime.datetime.now().strftime('%Y%m%d_%H%M%S')}.txt"
|
1148 |
+
# Must return a file path or bytes/BytesIO
|
1149 |
+
log_bytes = log_data.encode('utf-8')
|
1150 |
+
temp_file = io.BytesIO(log_bytes)
|
1151 |
+
temp_file.name = filename # Suggest a filename
|
1152 |
+
# Returning BytesIO might not work directly with gr.DownloadButton value generation
|
1153 |
+
# Let's stick to the NamedTemporaryFile approach, triggered by a separate button
|
1154 |
+
|
1155 |
+
# Re-add the export button to trigger the file prep
|
1156 |
+
export_log_button_final = gr.Button("Préparer l'Export du Journal")
|
1157 |
|
1158 |
export_log_button_final.click(
|
1159 |
+
fn=prepare_log_for_download,
|
1160 |
+
inputs=session_log_state,
|
1161 |
+
outputs=download_log_button # Update the DownloadButton (makes visible, sets path)
|
1162 |
)
|
1163 |
|
1164 |
+
|
1165 |
+
# --- Launch App ---
|
1166 |
+
# Initial check for OpenRouter key
|
|
|
1167 |
if not openrouter_api_key:
|
1168 |
+
print("\n" + "="*60)
|
1169 |
+
print("AVERTISSEMENT : Clé API OpenRouter (`OPENROUTER_API_KEY`) non trouvée.")
|
1170 |
+
print("Le fonctionnement dépendra de la fourniture d'une clé OpenAI valide.")
|
1171 |
+
print("="*60 + "\n")
|
1172 |
+
# Initialize app_config_state accordingly in the Gradio definition?
|
1173 |
+
# The initial state already reflects this with "openrouter_key_provided": False
|
1174 |
+
|
1175 |
+
# Set initial API status message based on initial config possibilities
|
1176 |
+
initial_api_status = "Statut API: Non configuré."
|
1177 |
+
if openrouter_api_key:
|
1178 |
+
# Attempt to configure OpenRouter by default if key exists
|
1179 |
+
print("Clé OpenRouter trouvée, tentative de configuration initiale...")
|
1180 |
+
initial_config, initial_api_status, initial_log = configure_api_clients(None, app_config_state.value, "")
|
1181 |
+
app_config_state.value = initial_config # Update initial state value
|
1182 |
+
session_log_state.value = initial_log
|
1183 |
+
print(initial_api_status) # Print status to console
|
1184 |
+
# We need to update the Markdown display default value too
|
1185 |
+
api_status_display.value = initial_api_status # Set initial value for the Markdown component
|
1186 |
+
|
1187 |
+
demo.queue().launch(debug=False, share=False) # debug=True helpful for development
|