MicGuest
commited on
Update app.py
Browse files
app.py
CHANGED
@@ -1,14 +1,23 @@
|
|
1 |
-
from transformers import pipeline
|
2 |
import gradio as gr
|
|
|
|
|
3 |
|
|
|
|
|
|
|
|
|
4 |
|
5 |
-
|
6 |
|
7 |
-
|
8 |
-
|
9 |
-
|
|
|
|
|
10 |
|
|
|
|
|
|
|
|
|
11 |
|
12 |
-
|
13 |
-
with gr.Interface(predict, "textbox", "text") as interface:
|
14 |
-
interface.launch()
|
|
|
|
|
1 |
import gradio as gr
|
2 |
+
import torch
|
3 |
+
from transformers import BertTokenizerFast, EncoderDecoderModel
|
4 |
|
5 |
+
device = 'cuda' if torch.cuda.is_available() else 'cpu'
|
6 |
+
ckpt = 'mrm8488/bert2bert_shared-spanish-finetuned-summarization'
|
7 |
+
tokenizer = BertTokenizerFast.from_pretrained(ckpt)
|
8 |
+
model = EncoderDecoderModel.from_pretrained(ckpt).to(device)
|
9 |
|
10 |
+
def generate_summary(text):
|
11 |
|
12 |
+
inputs = tokenizer([text], padding="max_length", truncation=True, max_length=512, return_tensors="pt")
|
13 |
+
input_ids = inputs.input_ids.to(device)
|
14 |
+
attention_mask = inputs.attention_mask.to(device)
|
15 |
+
output = model.generate(input_ids, attention_mask=attention_mask)
|
16 |
+
return tokenizer.decode(output[0], skip_special_tokens=True)
|
17 |
|
18 |
+
demo = gr.Interface(fn=generate_summary,
|
19 |
+
inputs=gr.Textbox(lines=10, placeholder="Insert the text here"),
|
20 |
+
outputs=gr.Textbox(lines=4)
|
21 |
+
)
|
22 |
|
23 |
+
demo.launch()
|
|
|
|