import gradio as gr import nltk from nltk.sentiment.vader import SentimentIntensityAnalyzer from transformers import pipeline def greet(name): return "Hello " + name + "!!" def classify(text): return {"cat": 0.3, "dog": 0.7} def predict_sentiment(text): if model == "finiteautomata/bertweet-base-sentiment-analysis": # pipe = pipeline("text-classification", model="finiteautomata/bertweet-base-sentiment-analysis") # out = pipe(text, return_all_scores=True) # return {pred["label"]: pred["score"] for pred in out[0]} return {"cathf": 0.3, "doghf": 0.7} elif model == "vader": # nltk.download('vader_lexicon') # sia = SentimentIntensityAnalyzer() # return sia.polarity_scores(text) return {"catv": 0.3, "dogv": 0.7} demo = gr.Blocks() with demo: gr.Markdown("A bunch of different Gradio demos in tabs.\n\nNote that generally, the code that is in each tab could be its own Gradio application!") with gr.Tabs(): with gr.TabItem("Basic Hello"): gr.Markdown('The most basic "Hello World"-type demo you can write') interface = gr.Interface(fn=greet, inputs="text", outputs="text") with gr.TabItem("Label Output"): gr.Markdown("An example of a basic interface with a classification label as output") interface = gr.Interface(fn=classify, inputs="text", outputs="label") with gr.TabItem("Multiple Inputs"): gr.Markdown("A more complex interface for sentiment analysis with multiple inputs, including a dropdown, and some examples") demo = gr.Interface( fn=predict_sentiment, inputs=[ gr.Textbox(), # gr.Dropdown( # ["finiteautomata/bertweet-base-sentiment-analysis", "vader"], label="Model" # ), ], outputs="label" ) demo.launch()