Spaces:
Sleeping
Sleeping
Upload 4 files
Browse files- README.md +7 -4
- app.py +108 -47
- gitattributes +35 -0
- requirements.txt +3 -1
README.md
CHANGED
@@ -1,13 +1,16 @@
|
|
1 |
---
|
2 |
-
title: Math
|
3 |
-
emoji:
|
4 |
-
colorFrom:
|
5 |
colorTo: purple
|
6 |
sdk: gradio
|
7 |
sdk_version: 5.0.1
|
8 |
app_file: app.py
|
9 |
pinned: false
|
10 |
-
license:
|
|
|
|
|
|
|
11 |
---
|
12 |
|
13 |
An example chatbot using [Gradio](https://gradio.app), [`huggingface_hub`](https://huggingface.co/docs/huggingface_hub/v0.22.2/en/index), and the [Hugging Face Inference API](https://huggingface.co/docs/api-inference/index).
|
|
|
1 |
---
|
2 |
+
title: Qwen2.5 Math 7B Instruct GGUF
|
3 |
+
emoji: 📈
|
4 |
+
colorFrom: blue
|
5 |
colorTo: purple
|
6 |
sdk: gradio
|
7 |
sdk_version: 5.0.1
|
8 |
app_file: app.py
|
9 |
pinned: false
|
10 |
+
license: gpl-3.0
|
11 |
+
short_description: Runs Qwen2.5-Math-7B-Instruct-GGUF
|
12 |
+
models:
|
13 |
+
- bartowski/Qwen2.5-Math-7B-Instruct-GGUF
|
14 |
---
|
15 |
|
16 |
An example chatbot using [Gradio](https://gradio.app), [`huggingface_hub`](https://huggingface.co/docs/huggingface_hub/v0.22.2/en/index), and the [Hugging Face Inference API](https://huggingface.co/docs/api-inference/index).
|
app.py
CHANGED
@@ -1,64 +1,125 @@
|
|
|
|
|
|
|
|
1 |
import gradio as gr
|
2 |
-
from
|
|
|
3 |
|
|
|
|
|
|
|
|
|
4 |
"""
|
5 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
6 |
"""
|
7 |
-
|
8 |
|
9 |
|
|
|
10 |
def respond(
|
11 |
-
|
12 |
-
|
13 |
-
|
14 |
-
|
15 |
-
|
16 |
-
top_p,
|
17 |
):
|
18 |
-
|
19 |
-
|
20 |
-
|
21 |
-
|
22 |
-
|
23 |
-
|
24 |
-
|
25 |
-
|
26 |
-
|
|
|
|
|
27 |
|
28 |
response = ""
|
29 |
-
|
30 |
-
|
31 |
-
messages,
|
32 |
-
max_tokens=max_tokens,
|
33 |
stream=True,
|
|
|
34 |
temperature=temperature,
|
35 |
top_p=top_p,
|
36 |
-
)
|
37 |
-
|
38 |
-
|
39 |
-
|
40 |
-
|
|
|
|
|
41 |
|
42 |
|
43 |
-
""
|
44 |
-
|
45 |
-
|
46 |
-
|
47 |
-
|
48 |
-
|
49 |
-
|
50 |
-
|
51 |
-
gr.
|
52 |
-
|
53 |
-
|
54 |
-
|
55 |
-
|
56 |
-
|
57 |
-
|
58 |
-
|
59 |
-
|
60 |
-
)
|
61 |
-
|
62 |
|
63 |
if __name__ == "__main__":
|
64 |
-
demo.launch()
|
|
|
1 |
+
import json
|
2 |
+
import spaces
|
3 |
+
import subprocess
|
4 |
import gradio as gr
|
5 |
+
from llama_cpp import Llama
|
6 |
+
from huggingface_hub import hf_hub_download
|
7 |
|
8 |
+
CSS = """
|
9 |
+
#qwen-md .katex-display { display: inline; }
|
10 |
+
#qwen-md .katex-display>.katex { display: inline; }
|
11 |
+
#qwen-md .katex-display>.katex>.katex-html { display: inline; }
|
12 |
"""
|
13 |
+
|
14 |
+
hf_hub_download(
|
15 |
+
repo_id="bartowski/Qwen2.5-Math-7B-Instruct-GGUF",
|
16 |
+
filename="Qwen2.5-Math-7B-Instruct-Q6_K_L.gguf",
|
17 |
+
local_dir="./models",
|
18 |
+
)
|
19 |
+
|
20 |
+
llm = Llama(
|
21 |
+
model_path="models/Qwen2.5-Math-7B-Instruct-Q6_K_L.gguf",
|
22 |
+
flash_attn=True,
|
23 |
+
n_ctx=8192,
|
24 |
+
n_batch=1024,
|
25 |
+
chat_format="chatml",
|
26 |
+
)
|
27 |
+
|
28 |
+
# Gradio 组件
|
29 |
+
output_md = gr.Markdown(
|
30 |
+
label="Answer",
|
31 |
+
value="Answer will be presented here",
|
32 |
+
latex_delimiters=[
|
33 |
+
{"left": "\\(", "right": "\\)", "display": True},
|
34 |
+
{"left": "\\begin\{equation\}", "right": "\\end\{equation\}", "display": True},
|
35 |
+
{"left": "\\begin\{align\}", "right": "\\end\{align\}", "display": True},
|
36 |
+
{"left": "\\begin\{alignat\}", "right": "\\end\{alignat\}", "display": True},
|
37 |
+
{"left": "\\begin\{gather\}", "right": "\\end\{gather\}", "display": True},
|
38 |
+
{"left": "\\begin\{CD\}", "right": "\\end\{CD\}", "display": True},
|
39 |
+
{"left": "\\[", "right": "\\]", "display": True},
|
40 |
+
],
|
41 |
+
elem_id="qwen-md",
|
42 |
+
show_copy_button=True,
|
43 |
+
container=True,
|
44 |
+
render=False,
|
45 |
+
)
|
46 |
+
target_lang = gr.Dropdown(
|
47 |
+
choices=["Chinese", "English"],
|
48 |
+
value="Chinese",
|
49 |
+
label="Output Language",
|
50 |
+
interactive=True,
|
51 |
+
render=False,
|
52 |
+
)
|
53 |
+
new_tokens = gr.Slider(
|
54 |
+
minimum=1, maximum=8192, value=2048, step=1, label="Max new tokens", render=False
|
55 |
+
)
|
56 |
+
temperature = gr.Slider(
|
57 |
+
minimum=0, maximum=2.0, value=0.5, step=0.1, label="Temperature", render=False
|
58 |
+
)
|
59 |
+
top_p = gr.Slider(minimum=0.0, maximum=1.0, value=0.95, step=0.05, label="Top P", render=False)
|
60 |
+
input_text = gr.Textbox(label="Ask math questions here", render=False)
|
61 |
+
submit_btn = gr.Button(value="Ask", render=False)
|
62 |
+
banner = gr.Markdown(value="""
|
63 |
+
# 📖 Qwen2.5-Math GGUF
|
64 |
+
This WebUI is based on Qwen2.5-Math-7B-Instruct-GGUF for mathematical reasoning. You can input texts of mathematical or arithmetic problems.
|
65 |
"""
|
66 |
+
)
|
67 |
|
68 |
|
69 |
+
# Gradio 函数
|
70 |
def respond(
|
71 |
+
input_text,
|
72 |
+
lang="Chinese",
|
73 |
+
max_tokens=2048,
|
74 |
+
temperature=0.5,
|
75 |
+
top_p=0.95,
|
|
|
76 |
):
|
77 |
+
if lang == "Chinese":
|
78 |
+
sys_msg = "你是一个乐于助人的数学助手. 你使用中文回答问题"
|
79 |
+
else:
|
80 |
+
sys_msg = "You are a helpful math assistant. You should always provide your answer in English."
|
81 |
+
messages = [
|
82 |
+
{
|
83 |
+
"role": "system",
|
84 |
+
"content": sys_msg,
|
85 |
+
},
|
86 |
+
{"role": "user", "content": input_text},
|
87 |
+
]
|
88 |
|
89 |
response = ""
|
90 |
+
response = llm.create_chat_completion(
|
91 |
+
messages=messages,
|
|
|
|
|
92 |
stream=True,
|
93 |
+
max_tokens=max_tokens,
|
94 |
temperature=temperature,
|
95 |
top_p=top_p,
|
96 |
+
)
|
97 |
+
message_repl = ""
|
98 |
+
for chunk in response:
|
99 |
+
if len(chunk['choices'][0]["delta"]) != 0 and "content" in chunk['choices'][0]["delta"]:
|
100 |
+
message_repl = message_repl + \
|
101 |
+
chunk['choices'][0]["delta"]["content"]
|
102 |
+
yield message_repl
|
103 |
|
104 |
|
105 |
+
with gr.Blocks(css=CSS, theme="NoCrypt/miku") as demo:
|
106 |
+
submit_btn.click(
|
107 |
+
fn=respond,
|
108 |
+
inputs=[input_text, target_lang, new_tokens, temperature, top_p],
|
109 |
+
outputs=output_md,
|
110 |
+
)
|
111 |
+
with gr.Column():
|
112 |
+
banner.render()
|
113 |
+
with gr.Row():
|
114 |
+
with gr.Column():
|
115 |
+
input_text.render()
|
116 |
+
target_lang.render()
|
117 |
+
new_tokens.render()
|
118 |
+
temperature.render()
|
119 |
+
top_p.render()
|
120 |
+
submit_btn.render()
|
121 |
+
with gr.Column():
|
122 |
+
output_md.render()
|
|
|
123 |
|
124 |
if __name__ == "__main__":
|
125 |
+
demo.launch()
|
gitattributes
ADDED
@@ -0,0 +1,35 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
*.7z filter=lfs diff=lfs merge=lfs -text
|
2 |
+
*.arrow filter=lfs diff=lfs merge=lfs -text
|
3 |
+
*.bin filter=lfs diff=lfs merge=lfs -text
|
4 |
+
*.bz2 filter=lfs diff=lfs merge=lfs -text
|
5 |
+
*.ckpt filter=lfs diff=lfs merge=lfs -text
|
6 |
+
*.ftz filter=lfs diff=lfs merge=lfs -text
|
7 |
+
*.gz filter=lfs diff=lfs merge=lfs -text
|
8 |
+
*.h5 filter=lfs diff=lfs merge=lfs -text
|
9 |
+
*.joblib filter=lfs diff=lfs merge=lfs -text
|
10 |
+
*.lfs.* filter=lfs diff=lfs merge=lfs -text
|
11 |
+
*.mlmodel filter=lfs diff=lfs merge=lfs -text
|
12 |
+
*.model filter=lfs diff=lfs merge=lfs -text
|
13 |
+
*.msgpack filter=lfs diff=lfs merge=lfs -text
|
14 |
+
*.npy filter=lfs diff=lfs merge=lfs -text
|
15 |
+
*.npz filter=lfs diff=lfs merge=lfs -text
|
16 |
+
*.onnx filter=lfs diff=lfs merge=lfs -text
|
17 |
+
*.ot filter=lfs diff=lfs merge=lfs -text
|
18 |
+
*.parquet filter=lfs diff=lfs merge=lfs -text
|
19 |
+
*.pb filter=lfs diff=lfs merge=lfs -text
|
20 |
+
*.pickle filter=lfs diff=lfs merge=lfs -text
|
21 |
+
*.pkl filter=lfs diff=lfs merge=lfs -text
|
22 |
+
*.pt filter=lfs diff=lfs merge=lfs -text
|
23 |
+
*.pth filter=lfs diff=lfs merge=lfs -text
|
24 |
+
*.rar filter=lfs diff=lfs merge=lfs -text
|
25 |
+
*.safetensors filter=lfs diff=lfs merge=lfs -text
|
26 |
+
saved_model/**/* filter=lfs diff=lfs merge=lfs -text
|
27 |
+
*.tar.* filter=lfs diff=lfs merge=lfs -text
|
28 |
+
*.tar filter=lfs diff=lfs merge=lfs -text
|
29 |
+
*.tflite filter=lfs diff=lfs merge=lfs -text
|
30 |
+
*.tgz filter=lfs diff=lfs merge=lfs -text
|
31 |
+
*.wasm filter=lfs diff=lfs merge=lfs -text
|
32 |
+
*.xz filter=lfs diff=lfs merge=lfs -text
|
33 |
+
*.zip filter=lfs diff=lfs merge=lfs -text
|
34 |
+
*.zst filter=lfs diff=lfs merge=lfs -text
|
35 |
+
*tfevents* filter=lfs diff=lfs merge=lfs -text
|
requirements.txt
CHANGED
@@ -1 +1,3 @@
|
|
1 |
-
huggingface_hub
|
|
|
|
|
|
1 |
+
huggingface_hub
|
2 |
+
scikit-build-core
|
3 |
+
llama-cpp-python
|