FinalProject / app.py
Berbex's picture
Update app.py
3a65c6e
raw
history blame
2.01 kB
import gradio as gr
import torch
from datasets import load_dataset
from console_logging.console import Console
import numpy as np
console = Console()
dataset = load_dataset("zeroshot/twitter-financial-news-sentiment", )
from transformers import AutoModelForSequenceClassification, AutoTokenizer
model = AutoModelForSequenceClassification.from_pretrained("bert-base-uncased")
tokenizer = AutoTokenizer.from_pretrained("bert-base-uncased")
labels = [label for label in dataset['train'].features.keys() if label not in ['text']]
def preprocess_data(examples):
# take a batch of texts
text = examples["text"]
# encode them
encoding = tokenizer(text, padding="max_length", truncation=True, max_length=128)
# add labels
labels_batch = {k: examples[k] for k in examples.keys() if k in labels}
# create numpy array of shape (batch_size, num_labels)
labels_matrix = np.zeros((len(text), len(labels)))
# fill numpy array
for idx, label in enumerate(labels):
labels_matrix[:, idx] = labels_batch[label]
encoding["labels"] = labels_matrix.tolist()
return encoding
encoded_dataset = dataset.map(preprocess_data, batched=True, remove_columns=dataset['train'].column_names)
example = encoded_dataset['train'][0]
console.log(example.keys())
def sentiment_score(review):
tokens = tokenizer.encode(review, return_tensors='pt')
result = model(tokens)
return int(torch.argmax(result.logits))
dataset['sentiment'] = dataset['train']['text'].apply(lambda x: sentiment_score(x[:512]))
"""
categories = ('Car in good condition','Damaged Car')
def is_car(x) : return x[0].isupper()
def image_classifier(img):
pred,index,probs = learn.predict(img)
return dict(zip(categories, map(float,probs)))
# image = gr.inputs.Image(shape=(192,192))
image = gr.components.Image(shape=(192,192))
label = gr.components.Label()
examples = ['./car.jpg','./crash.jpg','./carf.jpg']
intf = gr.Interface(fn= image_classifier,inputs=image,outputs=label,examples=examples)
intf.launch()"""