Spaces:
Running
Running
Update app.py
Browse files
app.py
CHANGED
@@ -5,19 +5,19 @@ import torch
|
|
5 |
|
6 |
# 讟讜注谉 讗转 讛诪讜讚诇 讜讛-tokenizer
|
7 |
tokenizer = AutoTokenizer.from_pretrained('dicta-il/dictalm-7b-instruct')
|
8 |
-
model = AutoModelForCausalLM.from_pretrained('dicta-il/dictalm-7b-instruct', trust_remote_code=True)
|
9 |
|
10 |
# 讛讙讚专转 讛驻讜谞拽爪讬讛 诇爪'讗讟 注诐 讛诪讜讚诇
|
11 |
def chat_with_model(prompt):
|
12 |
model.eval()
|
13 |
with torch.inference_mode():
|
14 |
kwargs = dict(
|
15 |
-
inputs=tokenizer(prompt, return_tensors='pt').input_ids
|
16 |
do_sample=True,
|
17 |
top_k=50,
|
18 |
top_p=0.95,
|
19 |
-
temperature=0.
|
20 |
-
max_length=
|
21 |
min_new_tokens=5
|
22 |
)
|
23 |
output = model.generate(**kwargs)
|
@@ -26,4 +26,4 @@ def chat_with_model(prompt):
|
|
26 |
|
27 |
# 讬爪讬专转 诪诪砖拽 注诐 Gradio
|
28 |
interface = gr.Interface(fn=chat_with_model, inputs="text", outputs="text", title="Chat with DictaLM Model")
|
29 |
-
interface.launch()
|
|
|
5 |
|
6 |
# 讟讜注谉 讗转 讛诪讜讚诇 讜讛-tokenizer
|
7 |
tokenizer = AutoTokenizer.from_pretrained('dicta-il/dictalm-7b-instruct')
|
8 |
+
model = AutoModelForCausalLM.from_pretrained('dicta-il/dictalm-7b-instruct', trust_remote_code=True)
|
9 |
|
10 |
# 讛讙讚专转 讛驻讜谞拽爪讬讛 诇爪'讗讟 注诐 讛诪讜讚诇
|
11 |
def chat_with_model(prompt):
|
12 |
model.eval()
|
13 |
with torch.inference_mode():
|
14 |
kwargs = dict(
|
15 |
+
inputs=tokenizer(prompt, return_tensors='pt').input_ids,
|
16 |
do_sample=True,
|
17 |
top_k=50,
|
18 |
top_p=0.95,
|
19 |
+
temperature=0.5, # 讛讜专讚转 讛讟诪驻专讟讜专讛 诇讛拽讟谞转 讛讗拽专讗讬讜转
|
20 |
+
max_length=50, # 讛拽讟谞转 讛诪拽住讬诪讜诐 诇诪住驻专 拽讟谉 讬讜转专
|
21 |
min_new_tokens=5
|
22 |
)
|
23 |
output = model.generate(**kwargs)
|
|
|
26 |
|
27 |
# 讬爪讬专转 诪诪砖拽 注诐 Gradio
|
28 |
interface = gr.Interface(fn=chat_with_model, inputs="text", outputs="text", title="Chat with DictaLM Model")
|
29 |
+
interface.launch()
|