|
import gradio as gr |
|
from openai import OpenAI |
|
import os |
|
from dotenv import load_dotenv |
|
|
|
load_dotenv() |
|
|
|
|
|
client = OpenAI( |
|
base_url="https://api-inference.huggingface.co/v1", |
|
api_key=os.getenv('HF_TOKEN') |
|
) |
|
|
|
def generate_response(message, temperature=0.7): |
|
try: |
|
response = client.chat.completions.create( |
|
model="Nac31/Sacha-Mistral-0", |
|
messages=[{"role": "user", "content": message}], |
|
temperature=temperature, |
|
max_tokens=500, |
|
stream=False |
|
) |
|
return response.choices[0].message.content |
|
except Exception as e: |
|
return f"Une erreur s'est produite : {str(e)}" |
|
|
|
|
|
demo = gr.Interface( |
|
fn=generate_response, |
|
inputs=[ |
|
gr.Textbox(label="Votre message", placeholder="Entrez votre message ici..."), |
|
gr.Slider(minimum=0.1, maximum=1.0, value=0.7, label="Température") |
|
], |
|
outputs=gr.Textbox(label="Réponse"), |
|
title="Chat avec Sacha-Mistral", |
|
description="Un assistant conversationnel en français basé sur le modèle Sacha-Mistral" |
|
) |
|
|
|
if __name__ == "__main__": |
|
demo.launch() |