Update app.py
Browse files
app.py
CHANGED
@@ -2,29 +2,29 @@ import gradio as gr
|
|
2 |
from transformers import AutoModelForCausalLM, AutoTokenizer
|
3 |
import torch
|
4 |
|
5 |
-
MODEL_NAME = "Qwen/
|
6 |
|
7 |
# بارگذاری مدل و توکنایزر
|
8 |
tokenizer = AutoTokenizer.from_pretrained(MODEL_NAME)
|
9 |
model = AutoModelForCausalLM.from_pretrained(
|
10 |
MODEL_NAME,
|
11 |
-
torch_dtype=torch.float32
|
12 |
)
|
13 |
|
14 |
# تابع چتبات
|
15 |
def chat_with_qwen(prompt):
|
16 |
inputs = tokenizer(prompt, return_tensors="pt")
|
17 |
-
output = model.generate(**inputs, max_new_tokens=
|
18 |
response = tokenizer.decode(output[0], skip_special_tokens=True)
|
19 |
return response
|
20 |
|
21 |
-
#
|
22 |
iface = gr.Interface(
|
23 |
fn=chat_with_qwen,
|
24 |
inputs=gr.Textbox(lines=2, placeholder="سوال خود را اینجا بنویسید..."),
|
25 |
outputs="text",
|
26 |
-
title="Qwen
|
27 |
-
description="چتبات
|
28 |
)
|
29 |
|
30 |
iface.launch()
|
|
|
2 |
from transformers import AutoModelForCausalLM, AutoTokenizer
|
3 |
import torch
|
4 |
|
5 |
+
MODEL_NAME = "Qwen/Qwen1.5-4B"
|
6 |
|
7 |
# بارگذاری مدل و توکنایزر
|
8 |
tokenizer = AutoTokenizer.from_pretrained(MODEL_NAME)
|
9 |
model = AutoModelForCausalLM.from_pretrained(
|
10 |
MODEL_NAME,
|
11 |
+
torch_dtype=torch.float32 # روی CPU اجرا میشه
|
12 |
)
|
13 |
|
14 |
# تابع چتبات
|
15 |
def chat_with_qwen(prompt):
|
16 |
inputs = tokenizer(prompt, return_tensors="pt")
|
17 |
+
output = model.generate(**inputs, max_new_tokens=100)
|
18 |
response = tokenizer.decode(output[0], skip_special_tokens=True)
|
19 |
return response
|
20 |
|
21 |
+
# رابط Gradio
|
22 |
iface = gr.Interface(
|
23 |
fn=chat_with_qwen,
|
24 |
inputs=gr.Textbox(lines=2, placeholder="سوال خود را اینجا بنویسید..."),
|
25 |
outputs="text",
|
26 |
+
title="Qwen 1.5 4B Chatbot",
|
27 |
+
description="چتبات با مدل سبکتر برای منابع رایگان",
|
28 |
)
|
29 |
|
30 |
iface.launch()
|