Spaces:
Sleeping
Sleeping
Update app.py
Browse files
app.py
CHANGED
@@ -4,29 +4,11 @@ from langdetect import detect
|
|
4 |
from huggingface_hub import InferenceClient
|
5 |
import pandas as pd
|
6 |
import os
|
7 |
-
import nltk
|
8 |
import asyncio
|
9 |
-
nltk.download('punkt') # CORRECT : 'punkt' !
|
10 |
-
|
11 |
-
from nltk.tokenize import sent_tokenize
|
12 |
|
13 |
HF_TOKEN = os.getenv("HF_TOKEN")
|
14 |
|
15 |
-
#
|
16 |
-
translator_to_en = pipeline("translation", model="Helsinki-NLP/opus-mt-mul-en")
|
17 |
-
translator_to_fr = pipeline("translation", model="Helsinki-NLP/opus-mt-en-fr")
|
18 |
-
classifier = pipeline("sentiment-analysis", model="mrm8488/distilroberta-finetuned-financial-news-sentiment-analysis")
|
19 |
-
|
20 |
-
# Fonction traduction segmentée
|
21 |
-
def safe_translate_to_fr(text, max_length=512):
|
22 |
-
sentences = sent_tokenize(text)
|
23 |
-
translated_sentences = []
|
24 |
-
for sentence in sentences:
|
25 |
-
translated = translator_to_fr(sentence, max_length=max_length)[0]['translation_text']
|
26 |
-
translated_sentences.append(translated)
|
27 |
-
return " ".join(translated_sentences)
|
28 |
-
|
29 |
-
# Appel API Zephyr
|
30 |
async def call_zephyr_api(prompt, mode, hf_token=HF_TOKEN):
|
31 |
client = InferenceClient("HuggingFaceH4/zephyr-7b-beta", token=hf_token)
|
32 |
try:
|
@@ -36,15 +18,25 @@ async def call_zephyr_api(prompt, mode, hf_token=HF_TOKEN):
|
|
36 |
elif mode == "Équilibré":
|
37 |
max_new_tokens = 100
|
38 |
temperature = 0.5
|
39 |
-
else:
|
40 |
max_new_tokens = 150
|
41 |
temperature = 0.7
|
42 |
response = await asyncio.to_thread(client.text_generation, prompt, max_new_tokens=max_new_tokens, temperature=temperature)
|
43 |
return response
|
44 |
except Exception as e:
|
45 |
-
raise gr.Error(f"❌ Erreur API Hugging Face : {str(e)}")
|
|
|
|
|
|
|
|
|
|
|
|
|
46 |
|
47 |
-
#
|
|
|
|
|
|
|
|
|
48 |
def suggest_model(text):
|
49 |
word_count = len(text.split())
|
50 |
if word_count < 50:
|
@@ -54,33 +46,42 @@ def suggest_model(text):
|
|
54 |
else:
|
55 |
return "Précis"
|
56 |
|
57 |
-
#
|
58 |
def create_sentiment_gauge(sentiment, score):
|
59 |
score_percentage = score * 100
|
60 |
-
|
61 |
-
|
|
|
62 |
color = "#2E8B57"
|
63 |
elif sentiment.lower() == "negative":
|
64 |
color = "#DC143C"
|
|
|
|
|
65 |
|
66 |
html = f"""
|
67 |
<div style='width: 100%; max-width: 300px; margin: 10px 0;'>
|
68 |
-
<div style='background-color: #D3D3D3; border-radius: 5px; height: 20px; position: relative;'>
|
69 |
-
<div style='background-color: {color}; width: {score_percentage}%; height: 100%; border-radius: 5px;'
|
70 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
71 |
</div>
|
72 |
-
<div style='text-align: center; margin-top: 5px;'>Sentiment : {sentiment}</div>
|
73 |
</div>
|
74 |
"""
|
75 |
return html
|
76 |
|
77 |
-
#
|
78 |
async def full_analysis(text, mode, detail_mode, count, history):
|
79 |
if not text:
|
80 |
yield "Entrez une phrase.", "", "", "", 0, history, "", "Aucune analyse effectuée."
|
81 |
return
|
82 |
|
83 |
-
|
|
|
84 |
|
85 |
try:
|
86 |
lang = detect(text)
|
@@ -92,15 +93,17 @@ async def full_analysis(text, mode, detail_mode, count, history):
|
|
92 |
else:
|
93 |
text_en = text
|
94 |
|
95 |
-
yield "Analyse en cours... (Étape 2 : Analyse du sentiment)", "", "", "", count, history, "", "Analyse du sentiment"
|
96 |
|
|
|
97 |
result = await asyncio.to_thread(classifier, text_en)
|
98 |
result = result[0]
|
99 |
sentiment_output = f"Sentiment prédictif : {result['label']} (Score: {result['score']:.2f})"
|
100 |
sentiment_gauge = create_sentiment_gauge(result['label'], result['score'])
|
101 |
|
102 |
-
yield "Analyse en cours... (Étape 3 : Explication
|
103 |
|
|
|
104 |
explanation_prompt = f"""<|system|>
|
105 |
You are a professional financial analyst AI with expertise in economic forecasting.
|
106 |
</s>
|
@@ -109,13 +112,14 @@ Given the following question about a potential economic event: "{text}"
|
|
109 |
|
110 |
The predicted sentiment for this event is: {result['label'].lower()}.
|
111 |
|
112 |
-
Assume the event happens. Explain why this event would likely have a {result['label'].lower()} economic impact.
|
113 |
</s>
|
114 |
<|assistant|>"""
|
115 |
explanation_en = await call_zephyr_api(explanation_prompt, mode)
|
116 |
|
117 |
-
yield "Analyse en cours... (Étape 4 : Traduction
|
118 |
|
|
|
119 |
explanation_fr = safe_translate_to_fr(explanation_en)
|
120 |
|
121 |
count += 1
|
@@ -127,9 +131,9 @@ Assume the event happens. Explain why this event would likely have a {result['la
|
|
127 |
"Explication_FR": explanation_fr
|
128 |
})
|
129 |
|
130 |
-
yield sentiment_output, text, explanation_en, explanation_fr, count, history, sentiment_gauge, "
|
131 |
|
132 |
-
#
|
133 |
def download_history(history):
|
134 |
if not history:
|
135 |
return None
|
@@ -138,62 +142,197 @@ def download_history(history):
|
|
138 |
df.to_csv(file_path, index=False)
|
139 |
return file_path
|
140 |
|
141 |
-
#
|
142 |
def launch_app():
|
143 |
custom_css = """
|
144 |
-
|
|
|
145 |
body {
|
146 |
background: linear-gradient(135deg, #0A1D37 0%, #1A3C34 100%);
|
147 |
-
font-family: 'Inter', sans-serif;
|
148 |
color: #E0E0E0;
|
149 |
-
|
|
|
|
|
150 |
}
|
|
|
151 |
.gr-box {
|
152 |
background: #2A4A43 !important;
|
|
|
153 |
border: 1px solid #FFD700 !important;
|
154 |
-
|
155 |
-
padding:
|
156 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
157 |
}
|
|
|
158 |
.gr-button {
|
159 |
-
background: linear-gradient(90deg, #FFD700
|
160 |
-
color: #0A1D37;
|
161 |
-
|
162 |
-
border:
|
163 |
-
|
164 |
-
|
165 |
-
|
|
|
|
|
166 |
}
|
|
|
167 |
.gr-button:hover {
|
168 |
-
transform: translateY(-
|
169 |
-
box-shadow: 0
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
170 |
}
|
171 |
"""
|
172 |
|
173 |
with gr.Blocks(theme=gr.themes.Base(), css=custom_css) as iface:
|
174 |
-
gr.Markdown("# 📈 Analyse Financière Premium
|
175 |
-
gr.Markdown("
|
176 |
|
177 |
count = gr.State(0)
|
178 |
history = gr.State([])
|
179 |
|
180 |
-
with gr.Row():
|
181 |
with gr.Column(scale=2):
|
182 |
-
|
183 |
-
|
184 |
-
|
185 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
186 |
|
187 |
-
|
188 |
-
|
|
|
|
|
189 |
|
190 |
with gr.Row():
|
191 |
-
|
192 |
-
|
193 |
-
|
194 |
-
|
195 |
-
|
196 |
-
|
|
|
|
|
|
|
197 |
|
198 |
download_file = gr.File(label="Fichier CSV")
|
199 |
|
@@ -214,4 +353,4 @@ def launch_app():
|
|
214 |
iface.launch(share=True)
|
215 |
|
216 |
if __name__ == "__main__":
|
217 |
-
launch_app()
|
|
|
4 |
from huggingface_hub import InferenceClient
|
5 |
import pandas as pd
|
6 |
import os
|
|
|
7 |
import asyncio
|
|
|
|
|
|
|
8 |
|
9 |
HF_TOKEN = os.getenv("HF_TOKEN")
|
10 |
|
11 |
+
# Fonction pour appeler l'API Zephyr avec des paramètres ajustés
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
12 |
async def call_zephyr_api(prompt, mode, hf_token=HF_TOKEN):
|
13 |
client = InferenceClient("HuggingFaceH4/zephyr-7b-beta", token=hf_token)
|
14 |
try:
|
|
|
18 |
elif mode == "Équilibré":
|
19 |
max_new_tokens = 100
|
20 |
temperature = 0.5
|
21 |
+
else: # Précis
|
22 |
max_new_tokens = 150
|
23 |
temperature = 0.7
|
24 |
response = await asyncio.to_thread(client.text_generation, prompt, max_new_tokens=max_new_tokens, temperature=temperature)
|
25 |
return response
|
26 |
except Exception as e:
|
27 |
+
raise gr.Error(f"❌ Erreur d'appel API Hugging Face : {str(e)}")
|
28 |
+
|
29 |
+
# Chargement du modèle de sentiment pour analyser les réponses
|
30 |
+
classifier = pipeline("sentiment-analysis", model="mrm8488/distilroberta-finetuned-financial-news-sentiment-analysis")
|
31 |
+
|
32 |
+
# Modèles de traduction (optionnels, désactivés pour optimisation)
|
33 |
+
translator_to_en = pipeline("translation", model="Helsinki-NLP/opus-mt-mul-en")
|
34 |
|
35 |
+
# Fonction pour traduire un texte long (désactivée pour éviter l'erreur NLTK)
|
36 |
+
def safe_translate_to_fr(text, max_length=512):
|
37 |
+
return "Traduction désactivée pour l'instant pour éviter les problèmes de dépendance NLTK."
|
38 |
+
|
39 |
+
# Fonction pour suggérer le meilleur modèle
|
40 |
def suggest_model(text):
|
41 |
word_count = len(text.split())
|
42 |
if word_count < 50:
|
|
|
46 |
else:
|
47 |
return "Précis"
|
48 |
|
49 |
+
# Fonction pour créer une jauge de sentiment
|
50 |
def create_sentiment_gauge(sentiment, score):
|
51 |
score_percentage = score * 100
|
52 |
+
if sentiment.lower() == "neutral":
|
53 |
+
color = "#A9A9A9"
|
54 |
+
elif sentiment.lower() == "positive":
|
55 |
color = "#2E8B57"
|
56 |
elif sentiment.lower() == "negative":
|
57 |
color = "#DC143C"
|
58 |
+
else:
|
59 |
+
color = "#A9A9A9"
|
60 |
|
61 |
html = f"""
|
62 |
<div style='width: 100%; max-width: 300px; margin: 10px 0;'>
|
63 |
+
<div style='background-color: #D3D3D3; border-radius: 5px; height: 20px; position: relative; box-shadow: 0 2px 4px rgba(0,0,0,0.2);'>
|
64 |
+
<div style='background-color: {color}; width: {score_percentage}%; height: 100%; border-radius: 5px; transition: width 0.3s ease-in-out;'>
|
65 |
+
</div>
|
66 |
+
<span style='position: absolute; top: 0; left: 50%; transform: translateX(-50%); color: #0A1D37; font-size: 12px; line-height: 20px; font-weight: 600;'>
|
67 |
+
{score_percentage:.1f}%
|
68 |
+
</span>
|
69 |
+
</div>
|
70 |
+
<div style='text-align: center; font-size: 14px; margin-top: 5px; color: #E0E0E0;'>
|
71 |
+
Sentiment: {sentiment}
|
72 |
</div>
|
|
|
73 |
</div>
|
74 |
"""
|
75 |
return html
|
76 |
|
77 |
+
# Fonction d'analyse corrigée
|
78 |
async def full_analysis(text, mode, detail_mode, count, history):
|
79 |
if not text:
|
80 |
yield "Entrez une phrase.", "", "", "", 0, history, "", "Aucune analyse effectuée."
|
81 |
return
|
82 |
|
83 |
+
# Message de progression
|
84 |
+
yield "Analyse en cours... (Étape 1 : Détection de la langue)", "", "", "", count, history, "", "Étape 1 : Détection de la langue"
|
85 |
|
86 |
try:
|
87 |
lang = detect(text)
|
|
|
93 |
else:
|
94 |
text_en = text
|
95 |
|
96 |
+
yield "Analyse en cours... (Étape 2 : Analyse du sentiment)", "", "", "", count, history, "", "Étape 2 : Analyse du sentiment"
|
97 |
|
98 |
+
# Analyse du sentiment avec RoBERTa sur le texte d'entrée
|
99 |
result = await asyncio.to_thread(classifier, text_en)
|
100 |
result = result[0]
|
101 |
sentiment_output = f"Sentiment prédictif : {result['label']} (Score: {result['score']:.2f})"
|
102 |
sentiment_gauge = create_sentiment_gauge(result['label'], result['score'])
|
103 |
|
104 |
+
yield "Analyse en cours... (Étape 3 : Explication de l'impact)", "", "", "", count, history, "", "Étape 3 : Explication de l'impact"
|
105 |
|
106 |
+
# Appel à Zephyr pour expliquer l'impact basé sur le sentiment
|
107 |
explanation_prompt = f"""<|system|>
|
108 |
You are a professional financial analyst AI with expertise in economic forecasting.
|
109 |
</s>
|
|
|
112 |
|
113 |
The predicted sentiment for this event is: {result['label'].lower()}.
|
114 |
|
115 |
+
Assume the event happens (e.g., if the question is "Will the Federal Reserve raise interest rates?", assume they do raise rates). Explain why this event would likely have a {result['label'].lower()} economic impact. Provide a concise explanation in one paragraph, focusing on the potential effects on the economy. {"Use simple language for a general audience." if detail_mode == "Normal" else "Use detailed financial terminology for an expert audience."}
|
116 |
</s>
|
117 |
<|assistant|>"""
|
118 |
explanation_en = await call_zephyr_api(explanation_prompt, mode)
|
119 |
|
120 |
+
yield "Analyse en cours... (Étape 4 : Traduction)", "", "", "", count, history, "", "Étape 4 : Traduction"
|
121 |
|
122 |
+
# Traduction (désactivée pour éviter l'erreur NLTK)
|
123 |
explanation_fr = safe_translate_to_fr(explanation_en)
|
124 |
|
125 |
count += 1
|
|
|
131 |
"Explication_FR": explanation_fr
|
132 |
})
|
133 |
|
134 |
+
yield sentiment_output, text, explanation_en, explanation_fr, count, history, sentiment_gauge, "Analyse terminée."
|
135 |
|
136 |
+
# Fonction pour télécharger historique CSV
|
137 |
def download_history(history):
|
138 |
if not history:
|
139 |
return None
|
|
|
142 |
df.to_csv(file_path, index=False)
|
143 |
return file_path
|
144 |
|
145 |
+
# Interface Gradio améliorée
|
146 |
def launch_app():
|
147 |
custom_css = """
|
148 |
+
@import url('https://fonts.googleapis.com/css2?family=Inter:wght@400;600;700&display=swap');
|
149 |
+
|
150 |
body {
|
151 |
background: linear-gradient(135deg, #0A1D37 0%, #1A3C34 100%);
|
|
|
152 |
color: #E0E0E0;
|
153 |
+
font-family: 'Inter', sans-serif;
|
154 |
+
margin: 0;
|
155 |
+
padding: 30px;
|
156 |
}
|
157 |
+
|
158 |
.gr-box {
|
159 |
background: #2A4A43 !important;
|
160 |
+
border-radius: 16px !important;
|
161 |
border: 1px solid #FFD700 !important;
|
162 |
+
box-shadow: 0 8px 20px rgba(0, 0, 0, 0.5) !important;
|
163 |
+
padding: 30px !important;
|
164 |
+
margin: 20px 0 !important;
|
165 |
+
transition: transform 0.2s ease, box-shadow 0.3s ease !important;
|
166 |
+
}
|
167 |
+
|
168 |
+
.gr-box:hover {
|
169 |
+
transform: translateY(-5px) !important;
|
170 |
+
box-shadow: 0 12px 24px rgba(255, 215, 0, 0.4) !important;
|
171 |
+
}
|
172 |
+
|
173 |
+
.gr-textbox, .gr-dropdown {
|
174 |
+
background: #3A5A52 !important;
|
175 |
+
border: 2px solid #FFD700 !important;
|
176 |
+
border-radius: 10px !important;
|
177 |
+
color: #E0E0E0 !important;
|
178 |
+
font-size: 18px !important;
|
179 |
+
padding: 15px !important;
|
180 |
+
transition: border-color 0.3s ease, box-shadow 0.3s ease !important;
|
181 |
+
}
|
182 |
+
|
183 |
+
.gr-textbox:focus, .gr-dropdown:focus {
|
184 |
+
border-color: #FFD700 !important;
|
185 |
+
box-shadow: 0 0 12px rgba(255, 215, 0, 0.5) !important;
|
186 |
}
|
187 |
+
|
188 |
.gr-button {
|
189 |
+
background: linear-gradient(90deg, #FFD700 0%, #D4AF37 100%) !important;
|
190 |
+
color: #0A1D37 !important;
|
191 |
+
border: none !important;
|
192 |
+
border-radius: 10px !important;
|
193 |
+
padding: 15px 30px !important;
|
194 |
+
font-weight: 600 !important;
|
195 |
+
font-size: 18px !important;
|
196 |
+
transition: transform 0.1s ease, box-shadow 0.3s ease !important;
|
197 |
+
box-shadow: 0 4px 12px rgba(255, 215, 0, 0.3) !important;
|
198 |
}
|
199 |
+
|
200 |
.gr-button:hover {
|
201 |
+
transform: translateY(-3px) !important;
|
202 |
+
box-shadow: 0 8px 16px rgba(255, 215, 0, 0.5) !important;
|
203 |
+
}
|
204 |
+
|
205 |
+
h1, h2, h3 {
|
206 |
+
color: #FFD700 !important;
|
207 |
+
font-weight: 700 !important;
|
208 |
+
text-shadow: 0 2px 4px rgba(0, 0, 0, 0.3) !important;
|
209 |
+
animation: fadeIn 1s ease-in-out;
|
210 |
+
}
|
211 |
+
|
212 |
+
@keyframes fadeIn {
|
213 |
+
from { opacity: 0; transform: translateY(-10px); }
|
214 |
+
to { opacity: 1; transform: translateY(0); }
|
215 |
+
}
|
216 |
+
|
217 |
+
.gr-row {
|
218 |
+
margin: 25px 0 !important;
|
219 |
+
}
|
220 |
+
|
221 |
+
.gr-column {
|
222 |
+
padding: 20px !important;
|
223 |
+
}
|
224 |
+
|
225 |
+
label {
|
226 |
+
color: #FFD700 !important;
|
227 |
+
font-weight: 600 !important;
|
228 |
+
font-size: 18px !important;
|
229 |
+
margin-bottom: 10px !important;
|
230 |
+
display: flex !important;
|
231 |
+
align-items: center !important;
|
232 |
+
}
|
233 |
+
|
234 |
+
label::before {
|
235 |
+
font-family: "Font Awesome 6 Free";
|
236 |
+
font-weight: 900;
|
237 |
+
margin-right: 10px;
|
238 |
+
}
|
239 |
+
|
240 |
+
.gr-textbox label::before {
|
241 |
+
content: '\\f201';
|
242 |
+
}
|
243 |
+
|
244 |
+
.gr-html label::before {
|
245 |
+
content: '\\f080';
|
246 |
+
}
|
247 |
+
|
248 |
+
.gr-file label::before {
|
249 |
+
content: '\\f019';
|
250 |
+
}
|
251 |
+
|
252 |
+
.economic-question-section {
|
253 |
+
background: rgba(26, 60, 52, 0.95) !important;
|
254 |
+
border-radius: 16px;
|
255 |
+
padding: 30px;
|
256 |
+
margin: 25px 0;
|
257 |
+
box-shadow: 0 8px 20px rgba(0, 0, 0, 0.5);
|
258 |
+
}
|
259 |
+
|
260 |
+
.economic-question-section .gr-textbox {
|
261 |
+
background: rgba(46, 74, 67, 0.85) !important;
|
262 |
+
border: 2px solid #FFD700 !important;
|
263 |
+
box-shadow: 0 4px 12px rgba(255, 215, 0, 0.3) !important;
|
264 |
+
font-size: 20px !important;
|
265 |
+
padding: 20px !important;
|
266 |
+
}
|
267 |
+
|
268 |
+
.economic-question-section .gr-textbox:focus {
|
269 |
+
border-color: #FFD700 !important;
|
270 |
+
box-shadow: 0 0 14px rgba(255, 215, 0, 0.6) !important;
|
271 |
+
}
|
272 |
+
|
273 |
+
.options-section {
|
274 |
+
display: flex;
|
275 |
+
flex-direction: column;
|
276 |
+
gap: 20px;
|
277 |
+
margin-top: 20px;
|
278 |
+
}
|
279 |
+
|
280 |
+
.options-section .gr-dropdown {
|
281 |
+
width: 220px !important;
|
282 |
+
}
|
283 |
+
|
284 |
+
.options-section .gr-dropdown label::before {
|
285 |
+
content: '\\f0c9';
|
286 |
+
}
|
287 |
+
|
288 |
+
.progress-message {
|
289 |
+
color: #FFD700 !important;
|
290 |
+
font-style: italic;
|
291 |
+
margin-bottom: 15px;
|
292 |
}
|
293 |
"""
|
294 |
|
295 |
with gr.Blocks(theme=gr.themes.Base(), css=custom_css) as iface:
|
296 |
+
gr.Markdown("# 📈 Analyse Financière Premium + Explication IA", elem_id="title")
|
297 |
+
gr.Markdown("Posez une question sur un événement économique. L'IA analyse le sentiment et explique l'impact.", elem_classes=["subtitle"])
|
298 |
|
299 |
count = gr.State(0)
|
300 |
history = gr.State([])
|
301 |
|
302 |
+
with gr.Row(elem_classes=["economic-question-section"]):
|
303 |
with gr.Column(scale=2):
|
304 |
+
with gr.Column():
|
305 |
+
input_text = gr.Textbox(
|
306 |
+
lines=4,
|
307 |
+
label="Question Économique"
|
308 |
+
)
|
309 |
+
with gr.Column(scale=1, elem_classes=["options-section"]):
|
310 |
+
mode_selector = gr.Dropdown(
|
311 |
+
choices=["Rapide", "Équilibré", "Précis"],
|
312 |
+
value="Équilibré",
|
313 |
+
label="Mode (longueur et style de réponse)"
|
314 |
+
)
|
315 |
+
detail_mode_selector = gr.Dropdown(
|
316 |
+
choices=["Normal", "Expert"],
|
317 |
+
value="Normal",
|
318 |
+
label="Niveau de détail (simplicité ou technicité)"
|
319 |
+
)
|
320 |
|
321 |
+
with gr.Row():
|
322 |
+
analyze_btn = gr.Button("Analyser")
|
323 |
+
reset_graph_btn = gr.Button("Réinitialiser")
|
324 |
+
download_btn = gr.Button("Télécharger CSV")
|
325 |
|
326 |
with gr.Row():
|
327 |
+
with gr.Column(scale=1):
|
328 |
+
progress_message = gr.Textbox(label="Progression", elem_classes=["progress-message"], interactive=False)
|
329 |
+
displayed_prompt = gr.Textbox(label="Votre question", interactive=False)
|
330 |
+
sentiment_output = gr.Textbox(label="Sentiment Prédictif")
|
331 |
+
sentiment_gauge = gr.HTML(label="Jauge de Sentiment")
|
332 |
+
with gr.Column(scale=2):
|
333 |
+
with gr.Row():
|
334 |
+
explanation_output_en = gr.Textbox(label="Explication en Anglais")
|
335 |
+
explanation_output_fr = gr.Textbox(label="Explication en Français")
|
336 |
|
337 |
download_file = gr.File(label="Fichier CSV")
|
338 |
|
|
|
353 |
iface.launch(share=True)
|
354 |
|
355 |
if __name__ == "__main__":
|
356 |
+
launch_app()
|