Spaces:
Sleeping
Sleeping
File size: 5,157 Bytes
12fb52c 73da356 12fb52c 73da356 12fb52c 73da356 12fb52c 6741a19 12fb52c 73da356 12fb52c 73da356 12fb52c abc7d26 12fb52c 031341e 12fb52c 6741a19 |
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 |
import gradio as gr
from transformers import pipeline
from langdetect import detect
from huggingface_hub import InferenceClient
import pandas as pd
import os
HF_TOKEN = os.getenv("HF_TOKEN")
# Fonction pour appeler l'API Zephyr
def call_zephyr_api(messages, hf_token=HF_TOKEN):
client = InferenceClient("HuggingFaceH4/zephyr-7b-beta", token=hf_token)
try:
response = client.chat_completion(messages, max_tokens=300)
return response.choices[0].message.content
except Exception as e:
raise gr.Error(f"❌ Erreur d'appel API Hugging Face : {str(e)}")
# Chargement du modèle de sentiment
classifier = pipeline("sentiment-analysis", model="mrm8488/distilroberta-finetuned-financial-news-sentiment-analysis")
# Modèles de traduction
translator_to_en = pipeline("translation", model="Helsinki-NLP/opus-mt-mul-en")
translator_to_fr = pipeline("translation", model="Helsinki-NLP/opus-mt-en-fr")
# Fonction pour suggérer le meilleur modèle
def suggest_model(text):
word_count = len(text.split())
if word_count < 50:
return "Rapide"
elif word_count <= 200:
return "Équilibré"
else:
return "Précis"
# Fonction d'analyse
def full_analysis(text, mode, detail_mode, count, history):
if not text:
return "Entrez une phrase.", "", "", 0, history, None
try:
lang = detect(text)
except:
lang = "unknown"
if lang != "en":
text = translator_to_en(text, max_length=512)[0]['translation_text']
result = classifier(text)[0]
sentiment_output = f"Sentiment : {result['label']} (Score: {result['score']:.2f})"
messages = [
{"role": "system", "content": "You are a professional financial analyst AI."},
{"role": "user", "content": f"Analyze the following financial news carefully:\n\"{text}\"\n\nThe detected sentiment for this news is: {result['label'].lower()}.\n\nNow, explain why the sentiment is {result['label'].lower()} using a logical, fact-based explanation.\nBase your reasoning only on the given news text.\nDo not repeat the news text or the prompt.\nRespond only with your financial analysis in one clear paragraph.\nWrite in a clear and professional tone."}
]
explanation_en = call_zephyr_api(messages)
explanation_en = call_zephyr_api(messages)
explanation_fr = translator_to_fr(explanation_en, max_length=512)[0]['translation_text']
count += 1
history.append({
"Texte": text,
"Sentiment": result['label'],
"Score": f"{result['score']:.2f}",
"Explication_EN": explanation_en,
"Explication_FR": explanation_fr
})
return sentiment_output, explanation_en, explanation_fr, count, history
# Fonction pour télécharger historique CSV
def download_history(history):
if not history:
return None
df = pd.DataFrame(history)
file_path = "/tmp/analysis_history.csv"
df.to_csv(file_path, index=False)
return file_path
# Interface Gradio
def launch_app():
with gr.Blocks(theme=gr.themes.Base(), css="body {background-color: #0D1117; color: white;} .gr-button {background-color: #161B22; border: 1px solid #30363D;}") as iface:
gr.Markdown("# 📈 Analyse Financière Premium + Explication IA", elem_id="title")
gr.Markdown("Entrez une actualité financière. L'IA analyse et explique en anglais/français. Choisissez votre mode d'explication.")
count = gr.State(0)
history = gr.State([])
with gr.Row():
input_text = gr.Textbox(lines=4, placeholder="Entrez une actualité ici...", label="Texte à analyser")
with gr.Row():
mode_selector = gr.Dropdown(
choices=["Rapide", "Équilibré", "Précis"],
value="Équilibré",
label="Mode recommandé selon la taille"
)
detail_mode_selector = gr.Dropdown(
choices=["Normal", "Expert"],
value="Normal",
label="Niveau de détail"
)
analyze_btn = gr.Button("Analyser")
reset_graph_btn = gr.Button("Reset Graphique")
download_btn = gr.Button("Télécharger CSV")
with gr.Row():
sentiment_output = gr.Textbox(label="Résultat du Sentiment")
with gr.Row():
with gr.Column():
explanation_output_en = gr.Textbox(label="Explication en Anglais")
with gr.Column():
explanation_output_fr = gr.Textbox(label="Explication en Français")
download_file = gr.File(label="Fichier CSV")
input_text.change(lambda t: gr.update(value=suggest_model(t)), inputs=[input_text], outputs=[mode_selector])
analyze_btn.click(
full_analysis,
inputs=[input_text, mode_selector, detail_mode_selector, count, history],
outputs=[sentiment_output, explanation_output_en, explanation_output_fr, count, history]
)
download_btn.click(
download_history,
inputs=[history],
outputs=[download_file]
)
iface.launch()
if __name__ == "__main__":
launch_app() |