Spaces:
Sleeping
Sleeping
File size: 5,028 Bytes
ffb571b 3e473a8 15a1d6d 4504964 e29f507 3e473a8 a2c7ce8 15a1d6d 1798572 15a1d6d 1798572 a62a318 a2c7ce8 3e473a8 15a1d6d 3e473a8 15a1d6d b13d443 3e473a8 581e72e 3e473a8 581e72e 3e473a8 581e72e 15a1d6d 3e473a8 850981e 3e473a8 4504964 3e473a8 15a1d6d 4504964 15a1d6d e6ea4ea 3e473a8 4504964 3e473a8 4504964 850981e 3e473a8 ffb571b 3e473a8 850981e ffb571b 3e473a8 850981e ffb571b 3e473a8 ffb571b 3e473a8 ffb571b 3e473a8 ffb571b 15a1d6d 4504964 ffb571b 3e473a8 ffb571b 3e473a8 |
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 |
import gradio as gr
from transformers import pipeline
from langdetect import detect
from huggingface_hub import InferenceClient
import pandas as pd
import os
HF_TOKEN = os.getenv("HF_TOKEN")
# Fonction pour appeler l'API Zephyr
def call_zephyr_api(prompt, hf_token=HF_TOKEN):
client = InferenceClient("HuggingFaceH4/zephyr-7b-beta", token=hf_token)
try:
response = client.text_generation(prompt, max_new_tokens=300)
return response
except Exception as e:
raise gr.Error(f"❌ Erreur d'appel API Hugging Face : {str(e)}")
# Chargement du modèle de sentiment
classifier = pipeline("sentiment-analysis", model="mrm8488/distilroberta-finetuned-financial-news-sentiment-analysis")
# Modèles de traduction
translator_to_en = pipeline("translation", model="Helsinki-NLP/opus-mt-mul-en")
translator_to_fr = pipeline("translation", model="Helsinki-NLP/opus-mt-en-fr")
# Fonction pour suggérer le meilleur modèle
def suggest_model(text):
word_count = len(text.split())
if word_count < 50:
return "Rapide"
elif word_count <= 200:
return "Équilibré"
else:
return "Précis"
# Fonction d'analyse
def full_analysis(text, mode, detail_mode, count, history):
if not text:
return "Entrez une phrase.", "", "", 0, history, None
try:
lang = detect(text)
except:
lang = "unknown"
if lang != "en":
text = translator_to_en(text, max_length=512)[0]['translation_text']
result = classifier(text)[0]
sentiment_output = f"Sentiment : {result['label']} (Score: {result['score']:.2f})"
prompt = f"""<|system|>
You are a professional financial analyst AI.
</s>
<|user|>
Analyze the following financial news carefully:
"{text}"
The detected sentiment for this news is: {result['label'].lower()}.
Now, explain why the sentiment is {result['label'].lower()} using a logical, fact-based explanation.
Base your reasoning only on the given news text.
Do not repeat the news text or the prompt.
Respond only with your financial analysis in one clear paragraph.
Write in a clear and professional tone.
</s>
<|assistant|>"""
explanation_en = call_zephyr_api(prompt)
explanation_fr = translator_to_fr(explanation_en, max_length=512)[0]['translation_text']
count += 1
history.append({
"Texte": text,
"Sentiment": result['label'],
"Score": f"{result['score']:.2f}",
"Explication_EN": explanation_en,
"Explication_FR": explanation_fr
})
return sentiment_output, explanation_en, explanation_fr, count, history
# Fonction pour télécharger historique CSV
def download_history(history):
if not history:
return None
df = pd.DataFrame(history)
file_path = "/tmp/analysis_history.csv"
df.to_csv(file_path, index=False)
return file_path
# Interface Gradio
def launch_app():
with gr.Blocks(theme=gr.themes.Base(), css="body {background-color: #0D1117; color: white;} .gr-button {background-color: #161B22; border: 1px solid #30363D;}") as iface:
gr.Markdown("# 📈 Analyse Financière Premium + Explication IA", elem_id="title")
gr.Markdown("Entrez une actualité financière. L'IA analyse et explique en anglais/français. Choisissez votre mode d'explication.")
count = gr.State(0)
history = gr.State([])
with gr.Row():
input_text = gr.Textbox(lines=4, placeholder="Entrez une actualité ici...", label="Texte à analyser")
with gr.Row():
mode_selector = gr.Dropdown(
choices=["Rapide", "Équilibré", "Précis"],
value="Équilibré",
label="Mode recommandé selon la taille"
)
detail_mode_selector = gr.Dropdown(
choices=["Normal", "Expert"],
value="Normal",
label="Niveau de détail"
)
analyze_btn = gr.Button("Analyser")
reset_graph_btn = gr.Button("Reset Graphique")
download_btn = gr.Button("Télécharger CSV")
with gr.Row():
sentiment_output = gr.Textbox(label="Résultat du Sentiment")
with gr.Row():
with gr.Column():
explanation_output_en = gr.Textbox(label="Explication en Anglais")
with gr.Column():
explanation_output_fr = gr.Textbox(label="Explication en Français")
download_file = gr.File(label="Fichier CSV")
input_text.change(lambda t: gr.update(value=suggest_model(t)), inputs=[input_text], outputs=[mode_selector])
analyze_btn.click(
full_analysis,
inputs=[input_text, mode_selector, detail_mode_selector, count, history],
outputs=[sentiment_output, explanation_output_en, explanation_output_fr, count, history]
)
download_btn.click(
download_history,
inputs=[history],
outputs=[download_file]
)
iface.launch()
if __name__ == "__main__":
launch_app() |