File size: 3,107 Bytes
13916fc 3a249bb 3d2120b 3a249bb 13916fc 75d548f 13916fc 75d548f 815e99c 75d548f 815e99c 75d548f 815e99c 75d548f 815e99c 75d548f 815e99c |
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 |
'''import gradio as gr
from transformers import TFBertForSequenceClassification, BertTokenizer
import tensorflow as tf
# Load model and tokenizer from your HF model repo
model = TFBertForSequenceClassification.from_pretrained("shrish191/sentiment-bert")
tokenizer = BertTokenizer.from_pretrained("shrish191/sentiment-bert")
def classify_sentiment(text):
inputs = tokenizer(text, return_tensors="tf", padding=True, truncation=True)
predictions = model(inputs).logits
label = tf.argmax(predictions, axis=1).numpy()[0]
labels = {0: "Negative", 1: "Neutral", 2: "Positive"}
return labels[label]
demo = gr.Interface(fn=classify_sentiment,
inputs=gr.Textbox(placeholder="Enter a tweet..."),
outputs="text",
title="Tweet Sentiment Classifier",
description="Multilingual BERT-based Sentiment Analysis")
demo.launch()
'''
'''
import gradio as gr
from transformers import TFBertForSequenceClassification, BertTokenizer
import tensorflow as tf
# Load model and tokenizer from your HF model repo
model = TFBertForSequenceClassification.from_pretrained("shrish191/sentiment-bert")
tokenizer = BertTokenizer.from_pretrained("shrish191/sentiment-bert")
def classify_sentiment(text):
text = text.lower().strip() # Normalize input
inputs = tokenizer(text, return_tensors="tf", padding=True, truncation=True)
predictions = model(inputs).logits
label = tf.argmax(predictions, axis=1).numpy()[0]
labels = model.config.id2label # Use mapping from config.json
print(f"Text: {text} | Prediction: {label} | Logits: {predictions.numpy()}") # Debug
return labels[str(label)] # Convert to string key
demo = gr.Interface(fn=classify_sentiment,
inputs=gr.Textbox(placeholder="Enter a tweet..."),
outputs="text",
title="Tweet Sentiment Classifier",
description="Multilingual BERT-based Sentiment Analysis")
demo.launch()
'''
import gradio as gr
from transformers import TFBertForSequenceClassification, AutoTokenizer
import tensorflow as tf
# Load model and tokenizer
model = TFBertForSequenceClassification.from_pretrained("shrish191/sentiment-bert")
tokenizer = AutoTokenizer.from_pretrained("shrish191/sentiment-bert")
def classify_sentiment(text):
text = text.lower().strip()
inputs = tokenizer(text, return_tensors="tf", padding=True, truncation=True)
outputs = model(inputs, training=False)
logits = outputs.logits
label_id = tf.argmax(logits, axis=1).numpy()[0]
# Ensure label ID is a string before looking it up
labels = model.config.id2label
label_name = labels.get(str(label_id), "Unknown")
print(f"Text: {text} | Label ID: {label_id} | Label: {label_name} | Logits: {logits.numpy()}")
return label_name
# Gradio UI
demo = gr.Interface(
fn=classify_sentiment,
inputs=gr.Textbox(placeholder="Enter a tweet..."),
outputs="text",
title="Tweet Sentiment Classifier",
description="Multilingual BERT-based Sentiment Analysis"
)
demo.launch()
|