Spaces:
Sleeping
Sleeping
#!/usr/bin/env python | |
# coding: utf-8 | |
# #### Gradio Comparing Transfer Learning Models | |
# In[1]: | |
import tensorflow as tf | |
print(tf.__version__) | |
# In[2]: | |
pip install gradio==1.6.0 | |
# In[3]: | |
pip install MarkupSafe==2.1.1 | |
# In[1]: | |
import gradio as gr | |
import tensorflow as tf | |
import numpy as np | |
from PIL import Image | |
import requests | |
# Download human-readable labels for ImageNet. | |
response = requests.get("https://git.io/JJkYN") | |
labels = response.text.split("\n") | |
mobile_net = tf.keras.applications.MobileNetV2() | |
inception_net = tf.keras.applications.InceptionV3() | |
# In[2]: | |
def classify_image_with_mobile_net(im): | |
im = Image.fromarray(im.astype('uint8'), 'RGB') | |
im = im.resize((224, 224)) | |
arr = np.array(im).reshape((-1, 224, 224, 3)) | |
arr = tf.keras.applications.mobilenet.preprocess_input(arr) | |
prediction = mobile_net.predict(arr).flatten() | |
return {labels[i]: float(prediction[i]) for i in range(1000)} | |
# In[3]: | |
def classify_image_with_inception_net(im): | |
# Resize the image to | |
im = Image.fromarray(im.astype('uint8'), 'RGB') | |
im = im.resize((299, 299)) | |
arr = np.array(im).reshape((-1, 299, 299, 3)) | |
arr = tf.keras.applications.inception_v3.preprocess_input(arr) | |
prediction = inception_net.predict(arr).flatten() | |
return {labels[i]: float(prediction[i]) for i in range(1000)} | |
# In[4]: | |
imagein = gr.inputs.Image() | |
label = gr.outputs.Label(num_top_classes=3) | |
sample_images = [ | |
["monkey.jpg"], | |
["sailboat.jpg"], | |
["bicycle.jpg"], | |
["download.jpg"], | |
] | |
# In[6]: | |
gr.Interface( | |
[classify_image_with_mobile_net, classify_image_with_inception_net], | |
imagein, | |
label, | |
title="MobileNet vs. InceptionNet", | |
description="""Let's compare 2 state-of-the-art machine learning models that classify images into one of 1,000 categories: MobileNet (top), | |
a lightweight model that has an accuracy of 0.704, vs. InceptionNet | |
(bottom), a much heavier model that has an accuracy of 0.779.""", | |
examples=sample_images).launch() | |
# In[6]: | |
pip install transformers | |
# In[6]: | |
import gradio as gr | |
from transformers import AutoTokenizer, AutoModelForSequenceClassification | |
# Load the models and tokenizers | |
from transformers import AutoTokenizer, AutoModelForSequenceClassification | |
tokenizer1 = AutoTokenizer.from_pretrained("textattack/bert-base-uncased-imdb") | |
tokenizer2 = AutoTokenizer.from_pretrained("nlptown/bert-base-multilingual-uncased-sentiment") | |
model1 = AutoModelForSequenceClassification.from_pretrained("textattack/bert-base-uncased-imdb") | |
model2 = AutoModelForSequenceClassification.from_pretrained("nlptown/bert-base-multilingual-uncased-sentiment") | |
# Define the sentiment prediction functions | |
def predict_sentiment(text): | |
# Predict sentiment using model 1 | |
inputs1 = tokenizer1.encode_plus(text, padding="longest", truncation=True, return_tensors="pt") | |
outputs1 = model1(**inputs1) | |
predicted_label1 = outputs1.logits.argmax().item() | |
sentiment1 = "Positive" if predicted_label1 == 1 else "Negative" if predicted_label1 == 0 else "Neutral" | |
# Predict sentiment using model 2 | |
inputs2 = tokenizer2.encode_plus(text, padding="longest", truncation=True, return_tensors="pt") | |
outputs2 = model2(**inputs2) | |
predicted_label2 = outputs2.logits.argmax().item() | |
sentiment2 = "Positive" if predicted_label2 == 1 else "Negative" if predicted_label2 == 0 else "Neutral" | |
return sentiment1, sentiment2 | |
# Create the Gradio interface | |
iface = gr.Interface( | |
fn=predict_sentiment, | |
inputs="text", | |
outputs=["text", "text"], | |
title="Sentiment Analysis (Model 1 vs Model 2)", | |
description="Compare sentiment predictions from two models.", | |
) | |
# Launch the interface | |
iface.launch() | |
# In[17]: | |
import gradio as gr | |
from transformers import AutoTokenizer, AutoModelForSequenceClassification | |
import torch | |
from torchvision import transforms | |
from io import BytesIO | |
from PIL import Image | |
# Define the available models and datasets | |
models = { | |
"Model 1": { | |
"model_name": "bert-base-uncased", | |
"tokenizer": None, | |
"model": None | |
}, | |
"Model 2": { | |
"model_name": "distilbert-base-uncased", | |
"tokenizer": None, | |
"model": None | |
}, | |
# Add more models as needed | |
} | |
datasets = { | |
"Dataset 1": { | |
"name": "imdb", | |
"split": "test", | |
"features": ["text"], | |
}, | |
"Dataset 2": { | |
"name": "ag_news", | |
"split": "test", | |
"features": ["text"], | |
}, | |
# Add more datasets as needed | |
} | |
# Load models | |
for model_key, model_info in models.items(): | |
tokenizer = AutoTokenizer.from_pretrained(model_info["model_name"]) | |
model = AutoModelForSequenceClassification.from_pretrained(model_info["model_name"]) | |
model_info["tokenizer"] = tokenizer | |
model_info["model"] = model | |
# Set the device to GPU if available | |
device = torch.device("cuda" if torch.cuda.is_available() else "cpu") | |
for model_info in models.values(): | |
model_info["model"].to(device) | |
# Define the preprocessing function | |
def preprocess(image_file): | |
image = Image.open(BytesIO(image_file.read())).convert("RGB") | |
preprocess_transform = transforms.Compose([ | |
transforms.Resize((224, 224)), | |
transforms.ToTensor(), | |
transforms.Normalize(mean=[0.485, 0.456, 0.406], std=[0.229, 0.224, 0.225]) | |
]) | |
image = preprocess_transform(image) | |
image = image.unsqueeze(0) | |
return image.to(device) | |
# Define the prediction function | |
def predict(image_file, model_key): | |
model_info = models[model_key] | |
tokenizer = model_info["tokenizer"] | |
model = model_info["model"] | |
image = preprocess(image_file) | |
with torch.no_grad(): | |
outputs = model(image) | |
predictions = outputs.logits.argmax(dim=1) | |
return predictions.item() | |
def classify_image(image, model_key): | |
image = Image.fromarray(image.astype('uint8'), 'RGB') | |
image_file = BytesIO() | |
image.save(image_file, format="JPEG") | |
prediction = predict(image_file=image_file, model_key=model_key) | |
return prediction | |
iface = gr.Interface(fn=classify_image, | |
inputs=["image", gr.inputs.Dropdown(list(models.keys()), label="Model")], | |
outputs="text", | |
title="Image Classification", | |
description="Classify images using Hugging Face models") | |
iface.launch() | |
# In[ ]: | |