Macropodus commited on
Commit
1f38345
·
verified ·
1 Parent(s): 20c7cb1

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +65 -27
app.py CHANGED
@@ -1,15 +1,41 @@
 
 
 
 
 
 
 
 
 
 
1
  import gradio as gr
2
- from huggingface_hub import InferenceClient
 
 
 
 
 
 
 
 
 
 
 
 
3
 
4
  """
5
- For more information on `huggingface_hub` Inference API support, please check the docs: https://huggingface.co/docs/huggingface_hub/v0.22.2/en/guides/inference
 
 
 
 
6
  """
7
- client = InferenceClient("HuggingFaceH4/zephyr-7b-beta")
8
 
9
 
10
  def respond(
11
  message,
12
- history: list[tuple[str, str]],
13
  system_message,
14
  max_tokens,
15
  temperature,
@@ -24,20 +50,27 @@ def respond(
24
  messages.append({"role": "assistant", "content": val[1]})
25
 
26
  messages.append({"role": "user", "content": message})
27
-
28
  response = ""
 
 
 
 
 
 
29
 
30
- for message in client.chat_completion(
31
- messages,
32
- max_tokens=max_tokens,
33
- stream=True,
34
- temperature=temperature,
35
- top_p=top_p,
36
- ):
37
- token = message.choices[0].delta.content
38
-
39
- response += token
40
- yield response
 
 
41
 
42
 
43
  """
@@ -46,19 +79,24 @@ For information on how to customize the ChatInterface, peruse the gradio docs: h
46
  demo = gr.ChatInterface(
47
  respond,
48
  additional_inputs=[
49
- gr.Textbox(value="You are a friendly Chatbot.", label="System message"),
50
- gr.Slider(minimum=1, maximum=2048, value=512, step=1, label="Max new tokens"),
51
- gr.Slider(minimum=0.1, maximum=4.0, value=0.7, step=0.1, label="Temperature"),
52
- gr.Slider(
53
- minimum=0.1,
54
- maximum=1.0,
55
- value=0.95,
56
- step=0.05,
57
- label="Top-p (nucleus sampling)",
58
- ),
59
- ],
 
 
 
 
60
  )
61
 
62
 
63
  if __name__ == "__main__":
64
  demo.launch()
 
 
1
+ # !/usr/bin/python
2
+ # -*- coding: utf-8 -*-
3
+ # @time : 2021/2/29 21:41
4
+ # @author : Mo
5
+ # @function: 文本纠错, 使用macro-correct
6
+
7
+
8
+ import os
9
+ os.environ["MACRO_CORRECT_FLAG_CSC_TOKEN"] = "1"
10
+ from macro_correct import correct
11
  import gradio as gr
12
+
13
+
14
+ ### 默认纠错(list输入)
15
+ text_list = ["真麻烦你了。希望你们好好的跳无",
16
+ "少先队员因该为老人让坐",
17
+ "机七学习是人工智能领遇最能体现智能的一个分知",
18
+ "一只小鱼船浮在平净的河面上"
19
+ ]
20
+ text_csc = correct(text_list)
21
+ print("默认纠错(list输入):")
22
+ for res_i in text_csc:
23
+ print(res_i)
24
+ print("#" * 128)
25
 
26
  """
27
+ 默认纠错(list输入):
28
+ {'index': 0, 'source': '真麻烦你了。希望你们好好的跳无', 'target': '真麻烦你了。希望你们好好地跳舞', 'errors': [['的', '地', 12, 0.6584], ['无', '舞', 14, 1.0]]}
29
+ {'index': 1, 'source': '少先队员因该为老人让坐', 'target': '少先队员应该为老人让坐', 'errors': [['因', '应', 4, 0.995]]}
30
+ {'index': 2, 'source': '机七学习是人工智能领遇最能体现智能的一个分知', 'target': '机器学习是人工智能领域最能体现智能的一个分支', 'errors': [['七', '器', 1, 0.9998], ['遇', '域', 10, 0.9999], ['知', '支', 21, 1.0]]}
31
+ {'index': 3, 'source': '一只小鱼船浮在平净的河面上', 'target': '一只小鱼船浮在平静的河面上', 'errors': [['净', '静', 8, 0.9961]]}
32
  """
33
+
34
 
35
 
36
  def respond(
37
  message,
38
+ history,
39
  system_message,
40
  max_tokens,
41
  temperature,
 
50
  messages.append({"role": "assistant", "content": val[1]})
51
 
52
  messages.append({"role": "user", "content": message})
 
53
  response = ""
54
+ message_csc = correct([message])
55
+ target = message_csc[0].get("target", "")
56
+ errors = message_csc[0].get("errors", "")
57
+ response = target + " " + str(errors)
58
+ for resp in response:
59
+ yield resp
60
 
61
+ # response = ""
62
+ #
63
+ # for message in client.chat_completion(
64
+ # messages,
65
+ # max_tokens=max_tokens,
66
+ # stream=True,
67
+ # temperature=temperature,
68
+ # top_p=top_p,
69
+ # ):
70
+ # token = message.choices[0].delta.content
71
+ #
72
+ # response += token
73
+ # yield response
74
 
75
 
76
  """
 
79
  demo = gr.ChatInterface(
80
  respond,
81
  additional_inputs=[
82
+ gr.Textbox(value="Macro-Correct", label="System message"),
83
+ # gr.Slider(minimum=1, maximum=2048, value=512, step=1, label="Max new tokens"),
84
+ # gr.Slider(minimum=0.1, maximum=4.0, value=0.7, step=0.1, label="Temperature"),
85
+ # gr.Slider(
86
+ # minimum=0.1,
87
+ # maximum=1.0,
88
+ # value=0.95,
89
+ # step=0.05,
90
+ # label="Top-p (nucleus sampling)",
91
+ # ),
92
+ ],
93
+ title="Chinese Spelling Correction Model Macropodus/macbert4csc_v2",
94
+ description="Copy or input error Chinese text. Submit and the machine will correct text.",
95
+ article="Link to <a href='https://github.com/yongzhuo/macro-correct' style='color:blue;' target='_blank\'>Github REPO: macro-correct</a>",
96
+
97
  )
98
 
99
 
100
  if __name__ == "__main__":
101
  demo.launch()
102
+ # demo.launch(server_name="0.0.0.0", server_port=8087, share=False, debug=True)