Spaces:
Sleeping
Sleeping
File size: 2,990 Bytes
f9706ce 6a2d159 f9278f9 431efbb f9706ce 2d7852d 431efbb 2d7852d f9706ce f9278f9 f0f7d30 f9278f9 2d7852d dc962d5 f9278f9 dc962d5 2d7852d 431efbb f9278f9 470c325 9afe62f f9278f9 9afe62f f9278f9 ff4ab6e 9afe62f f9278f9 ff4ab6e dc962d5 a2d03db fc753e0 dc962d5 a2d03db f9278f9 f9706ce |
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 |
import gradio as gr
import nltk
from nltk.sentiment.vader import SentimentIntensityAnalyzer
from transformers import pipeline
from joblib import load
from transformers import AutoModelForSequenceClassification, AutoTokenizer
import torch.nn.functional as F
# global variables to load models
lr_model = load("lr_model.joblib")
lr_vectorizer = load("vectorizer.joblib")
sentiment_pipe = pipeline("text-classification", model="finiteautomata/bertweet-base-sentiment-analysis")
bert_model = AutoModelForSequenceClassification.from_pretrained("./imdb-bert")
bert_tokenizer = AutoTokenizer.from_pretrained("./imdb-bert")
def greet(name):
return "Hello " + name + "!!"
def classify(text):
return {"cat": 0.3, "dog": 0.7}
def predict_sentiment(text, model):
if model == "finiteautomata/bertweet-base-sentiment-analysis":
out = sentiment_pipe(text, return_all_scores=True)
return {pred["label"]: pred["score"] for pred in out[0]}
elif model == "vader":
nltk.download('vader_lexicon')
sia = SentimentIntensityAnalyzer()
return sia.polarity_scores(text)
elif model == "custom logistic regression":
x = lr_vectorizer.transform([text])
pred = lr_model.predict_proba(x)[0]
return {"neg": pred[0], "pos": pred[1]}
elif model == "custom BERT":
pred = F.softmax(bert_model(**bert_tokenizer("I love you", return_tensors="pt")).logits[0], dim=0).tolist()
return {"neg": pred[0], "pos": pred[1]}
demo = gr.Blocks()
with demo:
gr.Markdown("A bunch of different Gradio demos in tabs.\n\nNote that generally, the code that is in each tab could be its own Gradio application!")
with gr.Tabs():
with gr.TabItem("Basic Hello"):
gr.Markdown('The most basic "Hello World"-type demo you can write')
interface = gr.Interface(fn=greet, inputs="text", outputs="text")
with gr.TabItem("Label Output"):
gr.Markdown("An example of a basic interface with a classification label as output")
interface = gr.Interface(fn=classify, inputs="text", outputs="label")
with gr.TabItem("Multiple Inputs"):
gr.Markdown("A more complex interface for sentiment analysis with multiple inputs, including a dropdown, and some examples")
interface = gr.Interface(
predict_sentiment,
[
gr.Textbox(placeholder="Your text input"),
gr.Dropdown(
["finiteautomata/bertweet-base-sentiment-analysis", "vader"], label="Model"
),
],
"label",
examples=[
["Happy smile", "vader"],
["Happy smile", "finiteautomata/bertweet-base-sentiment-analysis"],
["Sad frown", "vader"],
["Sad frown", "finiteautomata/bertweet-base-sentiment-analysis"],
]
)
demo.launch()
|