Update app
Browse files
app.py
CHANGED
@@ -24,12 +24,12 @@ model = AutoModelForCausalLM.from_pretrained(
|
|
24 |
)
|
25 |
pipe = pipeline("text-generation", model=model, tokenizer=tokenizer)
|
26 |
|
27 |
-
def generate_response(message, temperature=0.7):
|
28 |
try:
|
29 |
response = pipe(
|
30 |
message,
|
31 |
temperature=temperature,
|
32 |
-
max_new_tokens=
|
33 |
do_sample=True
|
34 |
)
|
35 |
return response[0]['generated_text']
|
@@ -41,7 +41,8 @@ demo = gr.Interface(
|
|
41 |
fn=generate_response,
|
42 |
inputs=[
|
43 |
gr.Textbox(label="Votre message", placeholder="Entrez votre message ici..."),
|
44 |
-
gr.Slider(minimum=0.1, maximum=1.0, value=0.7, label="Température")
|
|
|
45 |
],
|
46 |
outputs=gr.Textbox(label="Réponse"),
|
47 |
title="Chat avec Sacha-Mistral",
|
|
|
24 |
)
|
25 |
pipe = pipeline("text-generation", model=model, tokenizer=tokenizer)
|
26 |
|
27 |
+
def generate_response(message, temperature=0.7, max_new_tokens=500):
|
28 |
try:
|
29 |
response = pipe(
|
30 |
message,
|
31 |
temperature=temperature,
|
32 |
+
max_new_tokens=max_new_tokens,
|
33 |
do_sample=True
|
34 |
)
|
35 |
return response[0]['generated_text']
|
|
|
41 |
fn=generate_response,
|
42 |
inputs=[
|
43 |
gr.Textbox(label="Votre message", placeholder="Entrez votre message ici..."),
|
44 |
+
gr.Slider(minimum=0.1, maximum=1.0, value=0.7, label="Température"),
|
45 |
+
gr.Slider(minimum=10, maximum=500, value=50, label="Nombre de tokens")
|
46 |
],
|
47 |
outputs=gr.Textbox(label="Réponse"),
|
48 |
title="Chat avec Sacha-Mistral",
|