Tonic commited on
Commit
199a31d
·
1 Parent(s): 5207986

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +25 -24
app.py CHANGED
@@ -1,3 +1,15 @@
 
 
 
 
 
 
 
 
 
 
 
 
1
  from modelscope import AutoModelForCausalLM, AutoTokenizer, GenerationConfig, snapshot_download
2
  from argparse import ArgumentParser
3
  from pathlib import Path
@@ -9,12 +21,15 @@ import re
9
  import secrets
10
  import tempfile
11
 
 
12
  os.environ['CUDA_VISIBLE_DEVICES'] = '0,1'
13
  DEFAULT_CKPT_PATH = 'qwen/Qwen-VL-Chat'
14
  REVISION = 'v1.0.4'
15
  BOX_TAG_PATTERN = r"<box>([\s\S]*?)</box>"
16
  PUNCTUATION = "!?。"#$%&'()*+,-/:;<=>@[\]^_`{|}~⦅⦆「」、、〃》「」『』【】〔〕〖〗〘〙〚〛〜〝〞〟〰〾〿–—‘’‛“”„‟…‧﹏."
17
  uploaded_file_dir = os.environ.get("GRADIO_TEMP_DIR") or str(Path(tempfile.gettempdir()) / "gradio")
 
 
18
 
19
  def _get_args() -> ArgumentParser:
20
  parser = ArgumentParser()
@@ -35,7 +50,7 @@ def _get_args() -> ArgumentParser:
35
  args = parser.parse_args()
36
  return args
37
 
38
- def handle_image_submission(_chatbot, task_history, file, tokenizer, model) -> tuple:
39
  print("handle_image_submission called")
40
  if file is None:
41
  print("No file uploaded")
@@ -50,6 +65,7 @@ def handle_image_submission(_chatbot, task_history, file, tokenizer, model) -> t
50
 
51
 
52
  def _load_model_tokenizer(args) -> tuple:
 
53
  model_id = args.checkpoint_path
54
  model_dir = snapshot_download(model_id, revision=args.revision)
55
  tokenizer = AutoTokenizer.from_pretrained(
@@ -126,7 +142,7 @@ def add_file(history, task_history, file):
126
  return history, task_history
127
 
128
 
129
- def predict(_chatbot, task_history, tokenizer, model) -> list:
130
  print("predict called")
131
  if not _chatbot:
132
  return _chatbot
@@ -205,6 +221,7 @@ def process_response(response: str) -> str:
205
  response = response.replace("<ref>", "").replace(r"</ref>", "")
206
  response = re.sub(BOX_TAG_PATTERN, "", response)
207
  return response
 
208
  def process_history_for_model(task_history) -> list:
209
  processed_history = []
210
  for query, response in task_history:
@@ -227,13 +244,7 @@ def _launch_demo(args, model, tokenizer):
227
  )
228
 
229
  with gr.Blocks() as demo:
230
- gr.Markdown("""# Welcome to Tonic's Qwen-VL-Chat Bot""")
231
- gr.Markdown(
232
- """ Qwen-VL-Chat is a multimodal input model.
233
- 本WebUI基于Qwen-VL-Chat打造,实现聊天机器人功能 但我必须修复它这么多也许我也得到一些荣誉
234
- You can use this Space to test out the current model [qwen/Qwen-VL-Chat](https://huggingface.co/qwen/Qwen-VL-Chat) You can also use 🧑🏻‍🚀qwen/Qwen-VL-Chat🚀 by cloning this space. 🧬🔬🔍 Simply click here: <a style="display:inline-block" href="https://huggingface.co/spaces/Tonic1/VLChat?duplicate=true"><img src="https://img.shields.io/badge/-Duplicate%20Space-blue?labelColor=white&style=flat&logo=data:image/png;base64,iVBORw0KGgoAAAANSUhEUgAAABAAAAAQCAYAAAAf8/9hAAAAAXNSR0IArs4c6QAAAP5JREFUOE+lk7FqAkEURY+ltunEgFXS2sZGIbXfEPdLlnxJyDdYB62sbbUKpLbVNhyYFzbrrA74YJlh9r079973psed0cvUD4A+4HoCjsA85X0Dfn/RBLBgBDxnQPfAEJgBY+A9gALA4tcbamSzS4xq4FOQAJgCDwV2CPKV8tZAJcAjMMkUe1vX+U+SMhfAJEHasQIWmXNN3abzDwHUrgcRGmYcgKe0bxrblHEB4E/pndMazNpSZGcsZdBlYJcEL9Afo75molJyM2FxmPgmgPqlWNLGfwZGG6UiyEvLzHYDmoPkDDiNm9JR9uboiONcBXrpY1qmgs21x1QwyZcpvxt9NS09PlsPAAAAAElFTkSuQmCC&logoWidth=14" alt="Duplicate Space"></a></h3>
235
- Join us : 🌟TeamTonic🌟 is always making cool demos! Join our active builder's🛠️community on 👻Discord: [Discord](https://discord.gg/nXx5wbX9) On 🤗Huggingface: [TeamTonic](https://huggingface.co/TeamTonic) & [MultiTransformer](https://huggingface.co/MultiTransformer) On 🌐Github: [Polytonic](https://github.com/tonic-ai) & contribute to 🌟 [PolyGPT](https://github.com/tonic-ai/polygpt-alpha)
236
- """)
237
  with gr.Row():
238
  with gr.Column(scale=1):
239
  chatbot = gr.Chatbot(label='Qwen-VL-Chat')
@@ -251,44 +262,34 @@ Join us : 🌟TeamTonic🌟 is always making cool demos! Join our active builder
251
  submit_btn.click(
252
  fn=predict,
253
  inputs=[chatbot, task_history],
254
- outputs=[chatbot],
255
- _state=[tokenizer, model]
256
  )
257
 
258
  submit_file_btn.click(
259
  fn=handle_image_submission,
260
  inputs=[chatbot, task_history, file_upload],
261
- outputs=[chatbot, task_history],
262
- _state=[tokenizer, model]
263
  )
264
 
265
  regen_btn.click(
266
  fn=regenerate,
267
  inputs=[chatbot, task_history],
268
- outputs=[chatbot],
269
- _state=[tokenizer, model]
270
  )
271
 
272
  empty_bin.click(
273
  fn=reset_state,
274
  inputs=[task_history],
275
  outputs=[task_history],
276
- _state=[tokenizer, model]
277
  )
278
 
279
  query.submit(
280
  fn=add_text,
281
  inputs=[chatbot, task_history, query],
282
- outputs=[chatbot, task_history, query],
283
- _state=[tokenizer, model]
284
  )
285
 
286
- gr.Markdown("""
287
- Note: This demo is governed by the original license of Qwen-VL.
288
- We strongly advise users not to knowingly generate or allow others to knowingly generate harmful content,
289
- including hate speech, violence, pornography, deception, etc.
290
- (注:本演示受Qwen-VL的许可协议限制。我们强烈建议,用户不应传播及不应允许他人传播以下内容,
291
- 包括但不限于仇恨言论、暴力、色情、欺诈相关的有害信息。)""")
292
 
293
  demo.queue().launch()
294
 
 
1
+ description = """# 🙋🏻‍♂️Welcome to🌟Tonic's🦆Qwen-VL-Chat🤩Bot!🚀
2
+ 本WebUI基于Qwen-VL-Chat打造,实现聊天机器人功能 但我必须修复它这么多也许我也得到一些荣誉
3
+ Qwen-VL-Chat is a multimodal input model. You can use this Space to test out the current model [qwen/Qwen-VL-Chat](https://huggingface.co/qwen/Qwen-VL-Chat) You can also use 🧑🏻‍🚀qwen/Qwen-VL-Chat🚀 by cloning this space. 🧬🔬🔍 Simply click here: <a style="display:inline-block" href="https://huggingface.co/spaces/Tonic1/VLChat?duplicate=true"><img src="https://img.shields.io/badge/-Duplicate%20Space-blue?labelColor=white&style=flat&logo=data:image/png;base64,iVBORw0KGgoAAAANSUhEUgAAABAAAAAQCAYAAAAf8/9hAAAAAXNSR0IArs4c6QAAAP5JREFUOE+lk7FqAkEURY+ltunEgFXS2sZGIbXfEPdLlnxJyDdYB62sbbUKpLbVNhyYFzbrrA74YJlh9r079973psed0cvUD4A+4HoCjsA85X0Dfn/RBLBgBDxnQPfAEJgBY+A9gALA4tcbamSzS4xq4FOQAJgCDwV2CPKV8tZAJcAjMMkUe1vX+U+SMhfAJEHasQIWmXNN3abzDwHUrgcRGmYcgKe0bxrblHEB4E/pndMazNpSZGcsZdBlYJcEL9Afo75molJyM2FxmPgmgPqlWNLGfwZGG6UiyEvLzHYDmoPkDDiNm9JR9uboiONcBXrpY1qmgs21x1QwyZcpvxt9NS09PlsPAAAAAElFTkSuQmCC&logoWidth=14" alt="Duplicate Space"></a></h3>
4
+ Join us : 🌟TeamTonic🌟 is always making cool demos! Join our active builder's🛠️community on 👻Discord: [Discord](https://discord.gg/nXx5wbX9) On 🤗Huggingface: [TeamTonic](https://huggingface.co/TeamTonic) & [MultiTransformer](https://huggingface.co/MultiTransformer) On 🌐Github: [Polytonic](https://github.com/tonic-ai) & contribute to 🌟 [PolyGPT](https://github.com/tonic-ai/polygpt-alpha)
5
+ """
6
+ disclaimer = """
7
+ Note: This demo is governed by the original license of Qwen-VL.
8
+ We strongly advise users not to knowingly generate or allow others to knowingly generate harmful content,
9
+ including hate speech, violence, pornography, deception, etc.
10
+ (注:本演示受Qwen-VL的许可协议限制。我们强烈建议,用户不应传播及不应允许他人传播以下内容,
11
+ 包括但不限于仇恨言论、暴力、色情、欺诈相关的有害信息。)"""
12
+
13
  from modelscope import AutoModelForCausalLM, AutoTokenizer, GenerationConfig, snapshot_download
14
  from argparse import ArgumentParser
15
  from pathlib import Path
 
21
  import secrets
22
  import tempfile
23
 
24
+ #GlobalVariables
25
  os.environ['CUDA_VISIBLE_DEVICES'] = '0,1'
26
  DEFAULT_CKPT_PATH = 'qwen/Qwen-VL-Chat'
27
  REVISION = 'v1.0.4'
28
  BOX_TAG_PATTERN = r"<box>([\s\S]*?)</box>"
29
  PUNCTUATION = "!?。"#$%&'()*+,-/:;<=>@[\]^_`{|}~⦅⦆「」、、〃》「」『』【】〔〕〖〗〘〙〚〛〜〝〞〟〰〾〿–—‘’‛“”„‟…‧﹏."
30
  uploaded_file_dir = os.environ.get("GRADIO_TEMP_DIR") or str(Path(tempfile.gettempdir()) / "gradio")
31
+ tokenizer = None
32
+ model = None
33
 
34
  def _get_args() -> ArgumentParser:
35
  parser = ArgumentParser()
 
50
  args = parser.parse_args()
51
  return args
52
 
53
+ def handle_image_submission(_chatbot, task_history, file) -> tuple:
54
  print("handle_image_submission called")
55
  if file is None:
56
  print("No file uploaded")
 
65
 
66
 
67
  def _load_model_tokenizer(args) -> tuple:
68
+ global tokenizer, model
69
  model_id = args.checkpoint_path
70
  model_dir = snapshot_download(model_id, revision=args.revision)
71
  tokenizer = AutoTokenizer.from_pretrained(
 
142
  return history, task_history
143
 
144
 
145
+ def predict(_chatbot, task_history) -> list:
146
  print("predict called")
147
  if not _chatbot:
148
  return _chatbot
 
221
  response = response.replace("<ref>", "").replace(r"</ref>", "")
222
  response = re.sub(BOX_TAG_PATTERN, "", response)
223
  return response
224
+
225
  def process_history_for_model(task_history) -> list:
226
  processed_history = []
227
  for query, response in task_history:
 
244
  )
245
 
246
  with gr.Blocks() as demo:
247
+ gr.Markdown(description)
 
 
 
 
 
 
248
  with gr.Row():
249
  with gr.Column(scale=1):
250
  chatbot = gr.Chatbot(label='Qwen-VL-Chat')
 
262
  submit_btn.click(
263
  fn=predict,
264
  inputs=[chatbot, task_history],
265
+ outputs=[chatbot]
 
266
  )
267
 
268
  submit_file_btn.click(
269
  fn=handle_image_submission,
270
  inputs=[chatbot, task_history, file_upload],
271
+ outputs=[chatbot, task_history]
 
272
  )
273
 
274
  regen_btn.click(
275
  fn=regenerate,
276
  inputs=[chatbot, task_history],
277
+ outputs=[chatbot]
 
278
  )
279
 
280
  empty_bin.click(
281
  fn=reset_state,
282
  inputs=[task_history],
283
  outputs=[task_history],
 
284
  )
285
 
286
  query.submit(
287
  fn=add_text,
288
  inputs=[chatbot, task_history, query],
289
+ outputs=[chatbot, task_history, query]
 
290
  )
291
 
292
+ gr.Markdown(disclaimer)
 
 
 
 
 
293
 
294
  demo.queue().launch()
295