aaabiao commited on
Commit
4e73b84
·
verified ·
1 Parent(s): f7ae4a1

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +2 -3
app.py CHANGED
@@ -25,7 +25,6 @@ def generate(
25
  temperature: float = 0.7,
26
  top_p: float = 1.0,
27
  repetition_penalty: float = 1.1,
28
- input_button: bool = False
29
  ) -> Iterator[str]:
30
  conversation = []
31
  if system_prompt:
@@ -37,7 +36,7 @@ def generate(
37
  input_ids = tokenizer.apply_chat_template(conversation, return_tensors="pt")
38
  if input_ids.shape[1] > MAX_INPUT_TOKEN_LENGTH:
39
  input_ids = input_ids[:, -MAX_INPUT_TOKEN_LENGTH:]
40
- gr.Warning(f"Trimmed input from conversation as it was longer than {str(MAX_INPUT_TOKEN_LENGTH)} tokens.")
41
  input_ids = input_ids.to(model.device)
42
 
43
  streamer = TextIteratorStreamer(tokenizer, timeout=10.0, skip_prompt=True, skip_special_tokens=True)
@@ -92,7 +91,7 @@ chat_interface = gr.Interface(
92
  step=0.05,
93
  value=1.1,
94
  ),
95
- gr.Button("Generate Response")
96
  ],
97
  outputs=gr.Textbox(label="Chat Output", lines=10),
98
  title="🦣MAmmoTH2",
 
25
  temperature: float = 0.7,
26
  top_p: float = 1.0,
27
  repetition_penalty: float = 1.1,
 
28
  ) -> Iterator[str]:
29
  conversation = []
30
  if system_prompt:
 
36
  input_ids = tokenizer.apply_chat_template(conversation, return_tensors="pt")
37
  if input_ids.shape[1] > MAX_INPUT_TOKEN_LENGTH:
38
  input_ids = input_ids[:, -MAX_INPUT_TOKEN_LENGTH:]
39
+ gr.Warning(f"Trimmed input from conversation as it was longer than {MAX_INPUT_TOKEN_LENGTH} tokens.")
40
  input_ids = input_ids.to(model.device)
41
 
42
  streamer = TextIteratorStreamer(tokenizer, timeout=10.0, skip_prompt=True, skip_special_tokens=True)
 
91
  step=0.05,
92
  value=1.1,
93
  ),
94
+ "generate" # This is a placeholder for the button
95
  ],
96
  outputs=gr.Textbox(label="Chat Output", lines=10),
97
  title="🦣MAmmoTH2",