ColeGuion commited on
Commit
86bb08d
·
verified ·
1 Parent(s): 1b691d8

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +10 -19
app.py CHANGED
@@ -11,7 +11,7 @@ model = AutoModelForSeq2SeqLM.from_pretrained(model_name)
11
  tokenizer = AutoTokenizer.from_pretrained(model_name)
12
 
13
  def correct_text(text, max_length, max_new_tokens, min_length, num_beams, temperature, top_p):
14
- inputs = tokenizer.encode(text, return_tensors="pt")
15
 
16
  if max_new_tokens > 0:
17
  outputs = model.generate(
@@ -38,17 +38,7 @@ def correct_text(text, max_length, max_new_tokens, min_length, num_beams, temper
38
  return corrected_text
39
 
40
 
41
- def respond(message, history: list[tuple[str, str]], system_message, max_length, min_length, max_new_tokens, num_beams, temperature, top_p):
42
- #messages = [{"role": "system", "content": system_message}]
43
-
44
- #for val in history:
45
- # if val[0]:
46
- # messages.append({"role": "user", "content": val[0]})
47
- # if val[1]:
48
- # messages.append({"role": "assistant", "content": val[1]})
49
-
50
- #messages.append({"role": "user", "content": message})
51
-
52
  response = correct_text(message, max_length, max_new_tokens, min_length, num_beams, temperature, top_p)
53
  yield response
54
 
@@ -57,14 +47,15 @@ For information on how to customize the ChatInterface, peruse the gradio docs: h
57
  """
58
  demo = gr.ChatInterface(
59
  respond,
 
60
  additional_inputs=[
61
- gr.Textbox(value="You are a friendly Chatbot.", label="System message"),
62
- gr.Slider(minimum=1, maximum=2048, value=100, step=1, label="Max Length"),
63
- gr.Slider(minimum=1, maximum=2048, value=0, step=1, label="Min Length"),
64
- gr.Slider(minimum=1, maximum=2048, value=0, step=1, label="Max New Tokens (optional)"),
65
- gr.Slider(minimum=1, maximum=10, value=5, step=1, label="Num Beams"),
66
- gr.Slider(minimum=0.1, maximum=4.0, value=0.7, step=0.1, label="Temperature"),
67
- gr.Slider(minimum=0.1, maximum=1.0, value=0.95, step=0.05, label="Top-p (nucleus sampling)"),
68
  ],
69
  )
70
 
 
11
  tokenizer = AutoTokenizer.from_pretrained(model_name)
12
 
13
  def correct_text(text, max_length, max_new_tokens, min_length, num_beams, temperature, top_p):
14
+ inputs = tokenizer.encode("grammar: " + text, return_tensors="pt")
15
 
16
  if max_new_tokens > 0:
17
  outputs = model.generate(
 
38
  return corrected_text
39
 
40
 
41
+ def respond(message, history, max_length, min_length, max_new_tokens, num_beams, temperature, top_p):
 
 
 
 
 
 
 
 
 
 
42
  response = correct_text(message, max_length, max_new_tokens, min_length, num_beams, temperature, top_p)
43
  yield response
44
 
 
47
  """
48
  demo = gr.ChatInterface(
49
  respond,
50
+ examples=[{"text": "we shood buy an car."}, {"text": "she is more taller"}, {"text": "merhaba"}],
51
  additional_inputs=[
52
+ #gr.Textbox(value="You are a friendly Chatbot.", label="System message"),
53
+ gr.Slider(minimum=1, maximum=256, value=100, step=1, label="Max Length"),
54
+ gr.Slider(minimum=1, maximum=256, value=0, step=1, label="Min Length"),
55
+ gr.Slider(minimum=0, maximum=256, value=0, step=1, label="Max New Tokens (optional)"),
56
+ gr.Slider(minimum=1, maximum=10, value=5, step=1, label="Num Beams"),
57
+ gr.Slider(minimum=0.1, maximum=4.0, value=0.7, step=0.1, label="Temperature"),
58
+ gr.Slider(minimum=0.1, maximum=1.0, value=0.95, step=0.05, label="Top-p (nucleus sampling)"),
59
  ],
60
  )
61