T.Masuda commited on
Commit
0c7d0be
·
1 Parent(s): 3ec8592

update app.py

Browse files
Files changed (1) hide show
  1. app.py +6 -5
app.py CHANGED
@@ -17,10 +17,10 @@ if torch.cuda.is_available():
17
  generator = pipeline('text-generation', model=model, tokenizer=tokenizer, device=model.device)
18
  print('{}:done.'.format(datetime.now().strftime('%Y-%m-%d %H:%M:%S')))
19
 
20
- def generate(input_text):
21
  output = generator(
22
  input_text,
23
- max_length=128,
24
  do_sample=True,
25
  num_return_sequences=1,
26
  pad_token_id=tokenizer.pad_token_id,
@@ -35,15 +35,16 @@ with gr.Blocks(title='text generation ja') as chatbox:
35
 
36
  chatbot = gr.Chatbot(label='generated text')
37
  msg = gr.Textbox(label='text')
 
38
  clear = gr.ClearButton([msg, chatbot])
39
 
40
- def respond(message, chat_history):
41
  if message == '':
42
  return '', chat_history
43
- bot_message = generate(message)
44
  chat_history.append((message, bot_message))
45
  return '', chat_history
46
 
47
- msg.submit(respond, [msg, chatbot], [msg, chatbot])
48
 
49
  chatbox.launch()
 
17
  generator = pipeline('text-generation', model=model, tokenizer=tokenizer, device=model.device)
18
  print('{}:done.'.format(datetime.now().strftime('%Y-%m-%d %H:%M:%S')))
19
 
20
+ def generate(input_text, maxlen):
21
  output = generator(
22
  input_text,
23
+ max_length=maxlen,
24
  do_sample=True,
25
  num_return_sequences=1,
26
  pad_token_id=tokenizer.pad_token_id,
 
35
 
36
  chatbot = gr.Chatbot(label='generated text')
37
  msg = gr.Textbox(label='text')
38
+ maxlen = gr.Slider(minimum=30, maximum=256, value=30, step=1, label='max length')
39
  clear = gr.ClearButton([msg, chatbot])
40
 
41
+ def respond(message, maxlen, chat_history):
42
  if message == '':
43
  return '', chat_history
44
+ bot_message = generate(message, maxlen)
45
  chat_history.append((message, bot_message))
46
  return '', chat_history
47
 
48
+ msg.submit(respond, [msg, maxlen, chatbot], [msg, chatbot])
49
 
50
  chatbox.launch()