Tonic commited on
Commit
afdb985
·
1 Parent(s): 199a31d

Delete app.py

Browse files
Files changed (1) hide show
  1. app.py +0 -303
app.py DELETED
@@ -1,303 +0,0 @@
1
- description = """# 🙋🏻‍♂️Welcome to🌟Tonic's🦆Qwen-VL-Chat🤩Bot!🚀
2
- 本WebUI基于Qwen-VL-Chat打造,实现聊天机器人功能 但我必须修复它这么多也许我也得到一些荣誉
3
- Qwen-VL-Chat is a multimodal input model. You can use this Space to test out the current model [qwen/Qwen-VL-Chat](https://huggingface.co/qwen/Qwen-VL-Chat) You can also use 🧑🏻‍🚀qwen/Qwen-VL-Chat🚀 by cloning this space. 🧬🔬🔍 Simply click here: <a style="display:inline-block" href="https://huggingface.co/spaces/Tonic1/VLChat?duplicate=true"><img src="https://img.shields.io/badge/-Duplicate%20Space-blue?labelColor=white&style=flat&logo=data:image/png;base64,iVBORw0KGgoAAAANSUhEUgAAABAAAAAQCAYAAAAf8/9hAAAAAXNSR0IArs4c6QAAAP5JREFUOE+lk7FqAkEURY+ltunEgFXS2sZGIbXfEPdLlnxJyDdYB62sbbUKpLbVNhyYFzbrrA74YJlh9r079973psed0cvUD4A+4HoCjsA85X0Dfn/RBLBgBDxnQPfAEJgBY+A9gALA4tcbamSzS4xq4FOQAJgCDwV2CPKV8tZAJcAjMMkUe1vX+U+SMhfAJEHasQIWmXNN3abzDwHUrgcRGmYcgKe0bxrblHEB4E/pndMazNpSZGcsZdBlYJcEL9Afo75molJyM2FxmPgmgPqlWNLGfwZGG6UiyEvLzHYDmoPkDDiNm9JR9uboiONcBXrpY1qmgs21x1QwyZcpvxt9NS09PlsPAAAAAElFTkSuQmCC&logoWidth=14" alt="Duplicate Space"></a></h3>
4
- Join us : 🌟TeamTonic🌟 is always making cool demos! Join our active builder's🛠️community on 👻Discord: [Discord](https://discord.gg/nXx5wbX9) On 🤗Huggingface: [TeamTonic](https://huggingface.co/TeamTonic) & [MultiTransformer](https://huggingface.co/MultiTransformer) On 🌐Github: [Polytonic](https://github.com/tonic-ai) & contribute to 🌟 [PolyGPT](https://github.com/tonic-ai/polygpt-alpha)
5
- """
6
- disclaimer = """
7
- Note: This demo is governed by the original license of Qwen-VL.
8
- We strongly advise users not to knowingly generate or allow others to knowingly generate harmful content,
9
- including hate speech, violence, pornography, deception, etc.
10
- (注:本演示受Qwen-VL的许可协议限制。我们强烈建议,用户不应传播及不应允许他人传播以下内容,
11
- 包括但不限于仇恨言论、暴力、色情、欺诈相关的有害信息。)"""
12
-
13
- from modelscope import AutoModelForCausalLM, AutoTokenizer, GenerationConfig, snapshot_download
14
- from argparse import ArgumentParser
15
- from pathlib import Path
16
- import shutil
17
- import copy
18
- import gradio as gr
19
- import os
20
- import re
21
- import secrets
22
- import tempfile
23
-
24
- #GlobalVariables
25
- os.environ['CUDA_VISIBLE_DEVICES'] = '0,1'
26
- DEFAULT_CKPT_PATH = 'qwen/Qwen-VL-Chat'
27
- REVISION = 'v1.0.4'
28
- BOX_TAG_PATTERN = r"<box>([\s\S]*?)</box>"
29
- PUNCTUATION = "!?。"#$%&'()*+,-/:;<=>@[\]^_`{|}~⦅⦆「」、、〃》「」『』【】〔〕〖〗〘〙〚〛〜〝〞〟〰〾〿–—‘’‛“”„‟…‧﹏."
30
- uploaded_file_dir = os.environ.get("GRADIO_TEMP_DIR") or str(Path(tempfile.gettempdir()) / "gradio")
31
- tokenizer = None
32
- model = None
33
-
34
- def _get_args() -> ArgumentParser:
35
- parser = ArgumentParser()
36
- parser.add_argument("-c", "--checkpoint-path", type=str, default=DEFAULT_CKPT_PATH,
37
- help="Checkpoint name or path, default to %(default)r")
38
- parser.add_argument("--revision", type=str, default=REVISION)
39
- parser.add_argument("--cpu-only", action="store_true", help="Run demo with CPU only")
40
-
41
- parser.add_argument("--share", action="store_true", default=False,
42
- help="Create a publicly shareable link for the interface.")
43
- parser.add_argument("--inbrowser", action="store_true", default=False,
44
- help="Automatically launch the interface in a new tab on the default browser.")
45
- parser.add_argument("--server-port", type=int, default=8000,
46
- help="Demo server port.")
47
- parser.add_argument("--server-name", type=str, default="127.0.0.1",
48
- help="Demo server name.")
49
-
50
- args = parser.parse_args()
51
- return args
52
-
53
- def handle_image_submission(_chatbot, task_history, file) -> tuple:
54
- print("handle_image_submission called")
55
- if file is None:
56
- print("No file uploaded")
57
- return _chatbot, task_history
58
- print("File received:", file)
59
- file_path = save_image(file, uploaded_file_dir)
60
- print("File saved at:", file_path)
61
- history_item = ((file_path,), None)
62
- _chatbot.append(history_item)
63
- task_history.append(history_item)
64
- return predict(_chatbot, task_history, tokenizer, model)
65
-
66
-
67
- def _load_model_tokenizer(args) -> tuple:
68
- global tokenizer, model
69
- model_id = args.checkpoint_path
70
- model_dir = snapshot_download(model_id, revision=args.revision)
71
- tokenizer = AutoTokenizer.from_pretrained(
72
- model_dir, trust_remote_code=True, resume_download=True,
73
- )
74
-
75
- if args.cpu_only:
76
- device_map = "cpu"
77
- else:
78
- device_map = "auto"
79
-
80
- model = AutoModelForCausalLM.from_pretrained(
81
- model_dir,
82
- device_map=device_map,
83
- trust_remote_code=True,
84
- bf16=True,
85
- resume_download=True,
86
- ).eval()
87
- model.generation_config = GenerationConfig.from_pretrained(
88
- model_dir, trust_remote_code=True, resume_download=True,
89
- )
90
-
91
- return model, tokenizer
92
-
93
-
94
- def _parse_text(text: str) -> str:
95
- lines = text.split("\n")
96
- lines = [line for line in lines if line != ""]
97
- count = 0
98
- for i, line in enumerate(lines):
99
- if "```" in line:
100
- count += 1
101
- items = line.split("`")
102
- if count % 2 == 1:
103
- lines[i] = f'<pre><code class="language-{items[-1]}">'
104
- else:
105
- lines[i] = f"<br></code></pre>"
106
- else:
107
- if i > 0:
108
- if count % 2 == 1:
109
- line = line.replace("`", r"\`")
110
- line = line.replace("<", "&lt;")
111
- line = line.replace(">", "&gt;")
112
- line = line.replace(" ", "&nbsp;")
113
- line = line.replace("*", "&ast;")
114
- line = line.replace("_", "&lowbar;")
115
- line = line.replace("-", "&#45;")
116
- line = line.replace(".", "&#46;")
117
- line = line.replace("!", "&#33;")
118
- line = line.replace("(", "&#40;")
119
- line = line.replace(")", "&#41;")
120
- line = line.replace("$", "&#36;")
121
- lines[i] = "<br>" + line
122
- text = "".join(lines)
123
- return text
124
-
125
- def save_image(image_file, upload_dir: str) -> str:
126
- print("save_image called with:", image_file)
127
- Path(upload_dir).mkdir(parents=True, exist_ok=True)
128
- filename = secrets.token_hex(10) + Path(image_file.name).suffix
129
- file_path = Path(upload_dir) / filename
130
- print("Saving to:", file_path)
131
- with open(image_file.name, "rb") as f_input, open(file_path, "wb") as f_output:
132
- f_output.write(f_input.read())
133
- return str(file_path)
134
-
135
-
136
- def add_file(history, task_history, file):
137
- if file is None:
138
- return history, task_history
139
- file_path = save_image(file)
140
- history = history + [((file_path,), None)]
141
- task_history = task_history + [((file_path,), None)]
142
- return history, task_history
143
-
144
-
145
- def predict(_chatbot, task_history) -> list:
146
- print("predict called")
147
- if not _chatbot:
148
- return _chatbot
149
- chat_query = _chatbot[-1][0]
150
- print("Chat query:", chat_query)
151
-
152
- if isinstance(chat_query, tuple):
153
- query = [{'image': chat_query[0]}]
154
- else:
155
- query = [{'text': _parse_text(chat_query)}]
156
-
157
- print("Query for model:", query)
158
- inputs = tokenizer.from_list_format(query)
159
- tokenized_inputs = tokenizer(inputs, return_tensors='pt')
160
- tokenized_inputs = tokenized_inputs.to(model.device)
161
-
162
- pred = model.generate(**tokenized_inputs)
163
- response = tokenizer.decode(pred.cpu()[0], skip_special_tokens=False)
164
- print("Model response:", response)
165
- if 'image' in query[0]:
166
- image = tokenizer.draw_bbox_on_latest_picture(response)
167
- if image is not None:
168
- image_path = save_image(image, uploaded_file_dir)
169
- _chatbot[-1] = (chat_query, (image_path,))
170
- else:
171
- _chatbot[-1] = (chat_query, "No image to display.")
172
- else:
173
- _chatbot[-1] = (chat_query, response)
174
- return _chatbot
175
-
176
- def save_uploaded_image(image_file, upload_dir):
177
- if image is None:
178
- return None
179
- temp_dir = secrets.token_hex(20)
180
- temp_dir = Path(uploaded_file_dir) / temp_dir
181
- temp_dir.mkdir(exist_ok=True, parents=True)
182
- name = f"tmp{secrets.token_hex(5)}.jpg"
183
- filename = temp_dir / name
184
- image.save(str(filename))
185
- return str(filename)
186
-
187
- def regenerate(_chatbot, task_history) -> list:
188
- if not task_history:
189
- return _chatbot
190
- item = task_history[-1]
191
- if item[1] is None:
192
- return _chatbot
193
- task_history[-1] = (item[0], None)
194
- chatbot_item = _chatbot.pop(-1)
195
- if chatbot_item[0] is None:
196
- _chatbot[-1] = (_chatbot[-1][0], None)
197
- else:
198
- _chatbot.append((chatbot_item[0], None))
199
- return predict(_chatbot, task_history, tokenizer, model)
200
-
201
- def add_text(history, task_history, text) -> tuple:
202
- task_text = text
203
- if len(text) >= 2 and text[-1] in PUNCTUATION and text[-2] not in PUNCTUATION:
204
- task_text = text[:-1]
205
- history = history + [(_parse_text(text), None)]
206
- task_history = task_history + [(task_text, None)]
207
- return history, task_history, ""
208
-
209
- def add_file(history, task_history, file):
210
- if file is None:
211
- return history, task_history # Return if no file is uploaded
212
- file_path = file.name
213
- history = history + [((file.name,), None)]
214
- task_history = task_history + [((file.name,), None)]
215
- return history, task_history
216
-
217
- def reset_user_input():
218
- return gr.update(value="")
219
-
220
- def process_response(response: str) -> str:
221
- response = response.replace("<ref>", "").replace(r"</ref>", "")
222
- response = re.sub(BOX_TAG_PATTERN, "", response)
223
- return response
224
-
225
- def process_history_for_model(task_history) -> list:
226
- processed_history = []
227
- for query, response in task_history:
228
- if isinstance(query, tuple):
229
- query = {'image': query[0]}
230
- else:
231
- query = {'text': query}
232
- response = response or ""
233
- processed_history.append((query, response))
234
- return processed_history
235
-
236
- def reset_state(task_history) -> list:
237
- task_history.clear()
238
- return []
239
-
240
-
241
- def _launch_demo(args, model, tokenizer):
242
- uploaded_file_dir = os.environ.get("GRADIO_TEMP_DIR") or str(
243
- Path(tempfile.gettempdir()) / "gradio"
244
- )
245
-
246
- with gr.Blocks() as demo:
247
- gr.Markdown(description)
248
- with gr.Row():
249
- with gr.Column(scale=1):
250
- chatbot = gr.Chatbot(label='Qwen-VL-Chat')
251
- with gr.Column(scale=1):
252
- with gr.Row():
253
- query = gr.Textbox(lines=2, label='Input', placeholder="Type your message here...")
254
- submit_btn = gr.Button("🚀 Submit")
255
- with gr.Row():
256
- file_upload = gr.UploadButton("📁 Upload Image", file_types=["image"])
257
- submit_file_btn = gr.Button("Submit Image")
258
- regen_btn = gr.Button("🤔️ Regenerate")
259
- empty_bin = gr.Button("🧹 Clear History")
260
- task_history = gr.State([])
261
-
262
- submit_btn.click(
263
- fn=predict,
264
- inputs=[chatbot, task_history],
265
- outputs=[chatbot]
266
- )
267
-
268
- submit_file_btn.click(
269
- fn=handle_image_submission,
270
- inputs=[chatbot, task_history, file_upload],
271
- outputs=[chatbot, task_history]
272
- )
273
-
274
- regen_btn.click(
275
- fn=regenerate,
276
- inputs=[chatbot, task_history],
277
- outputs=[chatbot]
278
- )
279
-
280
- empty_bin.click(
281
- fn=reset_state,
282
- inputs=[task_history],
283
- outputs=[task_history],
284
- )
285
-
286
- query.submit(
287
- fn=add_text,
288
- inputs=[chatbot, task_history, query],
289
- outputs=[chatbot, task_history, query]
290
- )
291
-
292
- gr.Markdown(disclaimer)
293
-
294
- demo.queue().launch()
295
-
296
-
297
- def main():
298
- args = _get_args()
299
- model, tokenizer = _load_model_tokenizer(args)
300
- _launch_demo(args, model, tokenizer)
301
-
302
- if __name__ == '__main__':
303
- main()