AbstractQbit
Add
41d22ed
raw
history blame
1.7 kB
from transformers import AutoTokenizer, AutoModelForSequenceClassification
import torch
import gradio as gr
import pickle
torch.autograd.set_grad_enabled(False)
sklearn_model = pickle.load(open('classic_pipeline.pickle', 'rb'))
model_name = "AbstractQbit/electra_large_imdb_htsplice"
tokenizer = AutoTokenizer.from_pretrained(model_name)
model = AutoModelForSequenceClassification.from_pretrained(model_name)
def tokenize_with_splicing(text):
tokens = tokenizer(text, truncation=False)
if len(tokens['input_ids']) > 512:
tokens['input_ids'] = tokens['input_ids'][:129] + \
[102] + tokens['input_ids'][-382:]
tokens['token_type_ids'] = [0]*512
tokens['attention_mask'] = [1]*512
return tokens
def make_stars(prob):
stars = round(1 + prob*9)
return '★'*stars + '☆'*(10-stars)
def run_models(review):
prob_sklearn = float(sklearn_model.predict_proba([review])[0][1])
label_sklearn = 'positive' if prob_sklearn > 0.5 else 'negative'
res = f"TF-IDF SVC thinks the review is {label_sklearn} ({100*prob_sklearn:.2f}% positive).\n{make_stars(prob_sklearn):s}\n\n"
input = tokenize_with_splicing(review).convert_to_tensors('pt', True)
output = torch.nn.functional.softmax(model(**input).logits, dim=1)
prob_electra = float(output[0][1])
label_electra = 'positive' if prob_electra > 0.5 else 'negative'
res += f"ELECTRA thinks the review is {label_electra} ({100*prob_electra:.2f}% positive).\n{make_stars(prob_electra):s}"
return res
demo = gr.Interface(
fn=run_models,
inputs="text",
outputs="text",
title="Movie review classification",
allow_flagging='never'
)
demo.launch()