MicGuest commited on
Commit
64040db
·
unverified ·
1 Parent(s): 49d0e55

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +17 -8
app.py CHANGED
@@ -1,14 +1,23 @@
1
- from transformers import pipeline
2
  import gradio as gr
 
 
3
 
 
 
 
 
4
 
5
- model = pipeline("summarization", model="t5-base", tokenizer="t5-base", framework="tf")
6
 
7
- def predict(prompt):
8
- summary = model(context, max_length=130, min_length=60)
9
- return summary
 
 
10
 
 
 
 
 
11
 
12
- # create an interface for the model
13
- with gr.Interface(predict, "textbox", "text") as interface:
14
- interface.launch()
 
 
1
  import gradio as gr
2
+ import torch
3
+ from transformers import BertTokenizerFast, EncoderDecoderModel
4
 
5
+ device = 'cuda' if torch.cuda.is_available() else 'cpu'
6
+ ckpt = 'mrm8488/bert2bert_shared-spanish-finetuned-summarization'
7
+ tokenizer = BertTokenizerFast.from_pretrained(ckpt)
8
+ model = EncoderDecoderModel.from_pretrained(ckpt).to(device)
9
 
10
+ def generate_summary(text):
11
 
12
+ inputs = tokenizer([text], padding="max_length", truncation=True, max_length=512, return_tensors="pt")
13
+ input_ids = inputs.input_ids.to(device)
14
+ attention_mask = inputs.attention_mask.to(device)
15
+ output = model.generate(input_ids, attention_mask=attention_mask)
16
+ return tokenizer.decode(output[0], skip_special_tokens=True)
17
 
18
+ demo = gr.Interface(fn=generate_summary,
19
+ inputs=gr.Textbox(lines=10, placeholder="Insert the text here"),
20
+ outputs=gr.Textbox(lines=4)
21
+ )
22
 
23
+ demo.launch()