Spaces:
Sleeping
Sleeping
Update Gradio Examples.py
Browse files- Gradio Examples.py +0 -200
Gradio Examples.py
CHANGED
@@ -1,106 +1,3 @@
|
|
1 |
-
#!/usr/bin/env python
|
2 |
-
# coding: utf-8
|
3 |
-
|
4 |
-
# #### Gradio Comparing Transfer Learning Models
|
5 |
-
|
6 |
-
# In[1]:
|
7 |
-
|
8 |
-
|
9 |
-
import tensorflow as tf
|
10 |
-
print(tf.__version__)
|
11 |
-
|
12 |
-
|
13 |
-
# In[2]:
|
14 |
-
|
15 |
-
|
16 |
-
pip install gradio==1.6.0
|
17 |
-
|
18 |
-
|
19 |
-
# In[3]:
|
20 |
-
|
21 |
-
|
22 |
-
pip install MarkupSafe==2.1.1
|
23 |
-
|
24 |
-
|
25 |
-
# In[1]:
|
26 |
-
|
27 |
-
|
28 |
-
import gradio as gr
|
29 |
-
import tensorflow as tf
|
30 |
-
import numpy as np
|
31 |
-
from PIL import Image
|
32 |
-
import requests
|
33 |
-
|
34 |
-
|
35 |
-
# Download human-readable labels for ImageNet.
|
36 |
-
response = requests.get("https://git.io/JJkYN")
|
37 |
-
labels = response.text.split("\n")
|
38 |
-
|
39 |
-
mobile_net = tf.keras.applications.MobileNetV2()
|
40 |
-
inception_net = tf.keras.applications.InceptionV3()
|
41 |
-
|
42 |
-
|
43 |
-
# In[2]:
|
44 |
-
|
45 |
-
|
46 |
-
def classify_image_with_mobile_net(im):
|
47 |
-
im = Image.fromarray(im.astype('uint8'), 'RGB')
|
48 |
-
im = im.resize((224, 224))
|
49 |
-
arr = np.array(im).reshape((-1, 224, 224, 3))
|
50 |
-
arr = tf.keras.applications.mobilenet.preprocess_input(arr)
|
51 |
-
prediction = mobile_net.predict(arr).flatten()
|
52 |
-
return {labels[i]: float(prediction[i]) for i in range(1000)}
|
53 |
-
|
54 |
-
|
55 |
-
|
56 |
-
# In[3]:
|
57 |
-
|
58 |
-
|
59 |
-
def classify_image_with_inception_net(im):
|
60 |
-
# Resize the image to
|
61 |
-
im = Image.fromarray(im.astype('uint8'), 'RGB')
|
62 |
-
im = im.resize((299, 299))
|
63 |
-
arr = np.array(im).reshape((-1, 299, 299, 3))
|
64 |
-
arr = tf.keras.applications.inception_v3.preprocess_input(arr)
|
65 |
-
prediction = inception_net.predict(arr).flatten()
|
66 |
-
return {labels[i]: float(prediction[i]) for i in range(1000)}
|
67 |
-
|
68 |
-
|
69 |
-
# In[4]:
|
70 |
-
|
71 |
-
|
72 |
-
imagein = gr.inputs.Image()
|
73 |
-
label = gr.outputs.Label(num_top_classes=3)
|
74 |
-
sample_images = [
|
75 |
-
["monkey.jpg"],
|
76 |
-
["sailboat.jpg"],
|
77 |
-
["bicycle.jpg"],
|
78 |
-
["download.jpg"],
|
79 |
-
]
|
80 |
-
|
81 |
-
|
82 |
-
# In[6]:
|
83 |
-
|
84 |
-
|
85 |
-
gr.Interface(
|
86 |
-
[classify_image_with_mobile_net, classify_image_with_inception_net],
|
87 |
-
imagein,
|
88 |
-
label,
|
89 |
-
title="MobileNet vs. InceptionNet",
|
90 |
-
description="""Let's compare 2 state-of-the-art machine learning models that classify images into one of 1,000 categories: MobileNet (top),
|
91 |
-
a lightweight model that has an accuracy of 0.704, vs. InceptionNet
|
92 |
-
(bottom), a much heavier model that has an accuracy of 0.779.""",
|
93 |
-
examples=sample_images).launch()
|
94 |
-
|
95 |
-
|
96 |
-
# In[6]:
|
97 |
-
|
98 |
-
|
99 |
-
pip install transformers
|
100 |
-
|
101 |
-
|
102 |
-
# In[6]:
|
103 |
-
|
104 |
|
105 |
import gradio as gr
|
106 |
from transformers import AutoTokenizer, AutoModelForSequenceClassification
|
@@ -145,102 +42,5 @@ iface = gr.Interface(
|
|
145 |
iface.launch()
|
146 |
|
147 |
|
148 |
-
# In[17]:
|
149 |
-
|
150 |
-
|
151 |
-
import gradio as gr
|
152 |
-
from transformers import AutoTokenizer, AutoModelForSequenceClassification
|
153 |
-
import torch
|
154 |
-
from torchvision import transforms
|
155 |
-
from io import BytesIO
|
156 |
-
from PIL import Image
|
157 |
-
|
158 |
-
# Define the available models and datasets
|
159 |
-
models = {
|
160 |
-
"Model 1": {
|
161 |
-
"model_name": "bert-base-uncased",
|
162 |
-
"tokenizer": None,
|
163 |
-
"model": None
|
164 |
-
},
|
165 |
-
"Model 2": {
|
166 |
-
"model_name": "distilbert-base-uncased",
|
167 |
-
"tokenizer": None,
|
168 |
-
"model": None
|
169 |
-
},
|
170 |
-
# Add more models as needed
|
171 |
-
}
|
172 |
-
|
173 |
-
datasets = {
|
174 |
-
"Dataset 1": {
|
175 |
-
"name": "imdb",
|
176 |
-
"split": "test",
|
177 |
-
"features": ["text"],
|
178 |
-
},
|
179 |
-
"Dataset 2": {
|
180 |
-
"name": "ag_news",
|
181 |
-
"split": "test",
|
182 |
-
"features": ["text"],
|
183 |
-
},
|
184 |
-
# Add more datasets as needed
|
185 |
-
}
|
186 |
-
|
187 |
-
# Load models
|
188 |
-
for model_key, model_info in models.items():
|
189 |
-
tokenizer = AutoTokenizer.from_pretrained(model_info["model_name"])
|
190 |
-
model = AutoModelForSequenceClassification.from_pretrained(model_info["model_name"])
|
191 |
-
model_info["tokenizer"] = tokenizer
|
192 |
-
model_info["model"] = model
|
193 |
-
|
194 |
-
# Set the device to GPU if available
|
195 |
-
device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
|
196 |
-
for model_info in models.values():
|
197 |
-
model_info["model"].to(device)
|
198 |
-
|
199 |
-
# Define the preprocessing function
|
200 |
-
def preprocess(image_file):
|
201 |
-
image = Image.open(BytesIO(image_file.read())).convert("RGB")
|
202 |
-
preprocess_transform = transforms.Compose([
|
203 |
-
transforms.Resize((224, 224)),
|
204 |
-
transforms.ToTensor(),
|
205 |
-
transforms.Normalize(mean=[0.485, 0.456, 0.406], std=[0.229, 0.224, 0.225])
|
206 |
-
])
|
207 |
-
image = preprocess_transform(image)
|
208 |
-
image = image.unsqueeze(0)
|
209 |
-
return image.to(device)
|
210 |
-
|
211 |
-
# Define the prediction function
|
212 |
-
def predict(image_file, model_key):
|
213 |
-
model_info = models[model_key]
|
214 |
-
tokenizer = model_info["tokenizer"]
|
215 |
-
model = model_info["model"]
|
216 |
-
|
217 |
-
image = preprocess(image_file)
|
218 |
-
|
219 |
-
with torch.no_grad():
|
220 |
-
outputs = model(image)
|
221 |
-
|
222 |
-
predictions = outputs.logits.argmax(dim=1)
|
223 |
-
|
224 |
-
return predictions.item()
|
225 |
-
|
226 |
-
def classify_image(image, model_key):
|
227 |
-
image = Image.fromarray(image.astype('uint8'), 'RGB')
|
228 |
-
image_file = BytesIO()
|
229 |
-
image.save(image_file, format="JPEG")
|
230 |
-
prediction = predict(image_file=image_file, model_key=model_key)
|
231 |
-
return prediction
|
232 |
-
|
233 |
-
iface = gr.Interface(fn=classify_image,
|
234 |
-
inputs=["image", gr.inputs.Dropdown(list(models.keys()), label="Model")],
|
235 |
-
outputs="text",
|
236 |
-
title="Image Classification",
|
237 |
-
description="Classify images using Hugging Face models")
|
238 |
-
|
239 |
-
iface.launch()
|
240 |
-
|
241 |
-
|
242 |
-
# In[ ]:
|
243 |
-
|
244 |
-
|
245 |
|
246 |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
|
2 |
import gradio as gr
|
3 |
from transformers import AutoTokenizer, AutoModelForSequenceClassification
|
|
|
42 |
iface.launch()
|
43 |
|
44 |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
45 |
|
46 |
|