Update app.py
Browse files
app.py
CHANGED
@@ -1,153 +1,13 @@
|
|
1 |
-
import tensorflow.keras as keras
|
2 |
-
import extract_bottleneck_features
|
3 |
-
import cv2
|
4 |
import gradio as gr
|
5 |
-
|
6 |
-
|
7 |
-
|
8 |
-
|
9 |
-
|
10 |
-
|
11 |
-
|
12 |
-
|
13 |
-
|
14 |
-
|
15 |
-
|
16 |
-
|
17 |
-
from keras.applications.inception_v3 import InceptionV3, preprocess_input
|
18 |
-
return InceptionV3(weights='imagenet', include_top=False).predict(preprocess_input(tensor))
|
19 |
-
|
20 |
-
|
21 |
-
def extract_Resnet50(tensor):
|
22 |
-
from tensorflow.keras.applications.resnet50 import ResNet50, preprocess_input
|
23 |
-
return ResNet50(weights='imagenet', include_top=False).predict(preprocess_input(tensor))
|
24 |
-
|
25 |
-
|
26 |
-
###########################################
|
27 |
-
|
28 |
-
from tensorflow.keras.applications.resnet50 import preprocess_input
|
29 |
-
|
30 |
-
######################################
|
31 |
-
|
32 |
-
import tensorflow as tf
|
33 |
-
from keras.preprocessing import image
|
34 |
-
from tqdm import tqdm
|
35 |
-
|
36 |
-
######################################
|
37 |
-
|
38 |
-
from tensorflow.keras.applications.resnet50 import ResNet50
|
39 |
-
# define ResNet50 model
|
40 |
-
ResNet50_model = ResNet50(weights='imagenet')
|
41 |
-
|
42 |
-
from keras.preprocessing import image
|
43 |
-
from tqdm import tqdm
|
44 |
-
|
45 |
-
|
46 |
-
|
47 |
-
from tensorflow.keras.applications.resnet50 import preprocess_input
|
48 |
-
|
49 |
-
def ResNet50_predict_labels(img):
|
50 |
-
# returns prediction vector for image located at img_path
|
51 |
-
img = np.expand_dims(img, axis=0)
|
52 |
-
img = preprocess_input((img))
|
53 |
-
return np.argmax(ResNet50_model.predict(img))
|
54 |
-
|
55 |
-
|
56 |
-
def path_to_tensor(img_path):
|
57 |
-
# loads RGB image as PIL.Image.Image type
|
58 |
-
#img = image.load_img(img_path, target_size=(224, 224))
|
59 |
-
# convert PIL.Image.Image type to 3D tensor with shape (224, 224, 3)
|
60 |
-
#x = image.img_to_array(img)
|
61 |
-
# convert 3D tensor to 4D tensor with shape (1, 224, 224, 3) and return 4D tensor
|
62 |
-
return np.expand_dims(img_path, axis=0)
|
63 |
-
|
64 |
-
|
65 |
-
|
66 |
-
# extract pre-trained face detector
|
67 |
-
face_cascade = cv2.CascadeClassifier('haarcascade_frontalface_alt.xml')
|
68 |
-
|
69 |
-
|
70 |
-
|
71 |
-
def face_detector(image):
|
72 |
-
"""
|
73 |
-
returns "True" if face is detected in image stored at image
|
74 |
-
|
75 |
-
"""
|
76 |
-
|
77 |
-
gray = cv2.cvtColor(image, cv2.COLOR_BGR2GRAY)
|
78 |
-
faces = face_cascade.detectMultiScale(gray)
|
79 |
-
if len(faces) > 0:
|
80 |
-
return "Number of human faces found in this image: {}". format(len(faces))
|
81 |
-
else:
|
82 |
-
return "There are no human faces in this image"
|
83 |
-
|
84 |
-
|
85 |
-
|
86 |
-
|
87 |
-
def InceptionV3_prediction_breed(img_path):
|
88 |
-
"""
|
89 |
-
Return: dog breed that is predicted by the model
|
90 |
-
input: image
|
91 |
-
"""
|
92 |
-
|
93 |
-
# extract bottleneck features
|
94 |
-
bottleneck_feature = extract_InceptionV3(path_to_tensor(img_path))
|
95 |
-
# obtain predicted vector
|
96 |
-
predicted_vector = InceptionV3_model.predict(bottleneck_feature)
|
97 |
-
# return dog breed that is predicted by the model
|
98 |
-
return dog_names[np.argmax(predicted_vector)].split('.')[-1]
|
99 |
-
|
100 |
-
|
101 |
-
|
102 |
-
def dog_detector(img):
|
103 |
-
"""
|
104 |
-
input: uploaded image by user
|
105 |
-
return: "True" if a dog is detected in the image stored at img
|
106 |
-
"""
|
107 |
-
|
108 |
-
prediction = ResNet50_predict_labels(img)
|
109 |
-
return ((prediction <= 268) & (prediction >= 151))
|
110 |
-
|
111 |
-
def identify_dog_app(img):
|
112 |
-
"""This function predicts the breed of the human or dog"
|
113 |
-
|
114 |
-
input: uploaded image by user
|
115 |
-
Return: dog or human, and breed of the uploaded image
|
116 |
-
"""
|
117 |
-
|
118 |
-
breed = InceptionV3_prediction_breed(img)
|
119 |
-
if dog_detector(img):
|
120 |
-
return("This looks like a dog and its breed is:"),"{}".format(breed)
|
121 |
-
elif face_detector(img):
|
122 |
-
return("This looks like a human but might be classified as a dog of the following breed:"),"{}".format(breed)
|
123 |
-
else:
|
124 |
-
return("I have no idea what this might be. Please upload another image!"), ("Not applicable")
|
125 |
-
|
126 |
-
|
127 |
-
|
128 |
-
|
129 |
-
image = gr.inputs.Image(shape=(224, 224), label="Image")
|
130 |
-
label = gr.outputs.Label(num_top_classes=1)
|
131 |
-
|
132 |
-
iface = gr.Interface(
|
133 |
-
fn=identify_dog_app,
|
134 |
-
inputs=image,
|
135 |
-
outputs=[gr.outputs.Label(label="Human or Dog?"), gr.outputs.Label(label="Breed:")],
|
136 |
-
title="Human or dog Identification - Breed Classification",
|
137 |
-
#description ="Please find the jypyter notebook on ___",
|
138 |
-
article =
|
139 |
-
'<b><span style="color: #ff9900;">Acknowledgement:</span></b><br/>'
|
140 |
-
+'<p><span style="color: #ff9900;">I would like to express my special thanks of gratitude'
|
141 |
-
+'to Misk & Sdaia for giving me the opportunity to enrol in "Data Scientist" Udacity nanodegree,'
|
142 |
-
+' as well as to my mentor Mr. Haroon who was of great help during my learning journey.</span></p>'
|
143 |
-
+'<p><span style="color: #ff9900;">This is my capstone project and herewith I finish this ND.</span></p>',
|
144 |
-
|
145 |
-
theme="dark-huggingface"
|
146 |
-
|
147 |
-
)
|
148 |
-
|
149 |
-
iface.launch(share=False)
|
150 |
-
|
151 |
-
|
152 |
-
|
153 |
-
|
|
|
|
|
|
|
|
|
1 |
import gradio as gr
|
2 |
+
from gradio.mix import Series
|
3 |
+
|
4 |
+
description = "Generate your own D&D story!"
|
5 |
+
title = "French Story Generator using Opus MT and GPT-2"
|
6 |
+
translator_fr = gr.Interface.load("huggingface/Helsinki-NLP/opus-mt-fr-en")
|
7 |
+
story_gen = gr.Interface.load("huggingface/pranavpsv/gpt2-genre-story-generator")
|
8 |
+
translator_en = gr.Interface.load("huggingface/Helsinki-NLP/opus-mt-en-fr")
|
9 |
+
examples = [["L'aventurier est approché par un mystérieux étranger, pour une nouvelle quête."]]
|
10 |
+
|
11 |
+
Series(translator_fr, story_gen, translator_en, description = description,
|
12 |
+
title = title,
|
13 |
+
examples=examples, inputs = gr.inputs.Textbox(lines = 10)).launch()
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|