NORLIE JHON MALAGDAO commited on
Commit
5c44e1c
·
verified ·
1 Parent(s): 5dc2d3b

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +51 -101
app.py CHANGED
@@ -1,4 +1,3 @@
1
- !pip install gradio
2
  import gradio as gr
3
  import matplotlib.pyplot as plt
4
  import numpy as np
@@ -10,14 +9,11 @@ from tensorflow import keras
10
  from tensorflow.keras import layers
11
  from tensorflow.keras.models import Sequential
12
 
13
-
14
  from PIL import Image
15
  import gdown
16
  import zipfile
17
-
18
  import pathlib
19
 
20
-
21
  # Define the Google Drive shareable link
22
  gdrive_url = 'https://drive.google.com/file/d/1HjHYlQyRz5oWt8kehkt1TiOGRRlKFsv8/view?usp=drive_link'
23
 
@@ -57,158 +53,119 @@ for root, dirs, files in os.walk(extracted_path):
57
  for f in files:
58
  print(f"{subindent}{f}")
59
 
60
-
61
  # Path to the dataset directory
62
  data_dir = pathlib.Path('extracted_files/Pest_Dataset')
63
  data_dir = pathlib.Path(data_dir)
64
 
65
-
66
  bees = list(data_dir.glob('bees/*'))
67
  print(bees[0])
68
  PIL.Image.open(str(bees[0]))
69
 
70
-
71
  bees = list(data_dir.glob('bees/*'))
72
  print(bees[0])
73
  PIL.Image.open(str(bees[0]))
74
 
75
-
76
-
77
  batch_size = 32
78
  img_height = 180
79
  img_width = 180
80
 
81
-
82
  train_ds = tf.keras.utils.image_dataset_from_directory(
83
- data_dir,
84
- validation_split=0.2,
85
- subset="training",
86
- seed=123,
87
- image_size=(img_height, img_width),
88
- batch_size=batch_size)
89
-
90
 
91
  val_ds = tf.keras.utils.image_dataset_from_directory(
92
- data_dir,
93
- validation_split=0.2,
94
- subset="validation",
95
- seed=123,
96
- image_size=(img_height, img_width),
97
- batch_size=batch_size)
98
-
99
 
100
  class_names = train_ds.class_names
101
  print(class_names)
102
 
103
-
104
  import matplotlib.pyplot as plt
105
 
106
  plt.figure(figsize=(10, 10))
107
  for images, labels in train_ds.take(1):
108
- for i in range(9):
109
- ax = plt.subplot(3, 3, i + 1)
110
- plt.imshow(images[i].numpy().astype("uint8"))
111
- plt.title(class_names[labels[i]])
112
- plt.axis("off")
113
-
114
-
115
 
116
  for image_batch, labels_batch in train_ds:
117
- print(image_batch.shape)
118
- print(labels_batch.shape)
119
- break
120
-
121
 
122
  AUTOTUNE = tf.data.AUTOTUNE
123
 
124
  train_ds = train_ds.cache().shuffle(1000).prefetch(buffer_size=AUTOTUNE)
125
  val_ds = val_ds.cache().prefetch(buffer_size=AUTOTUNE)
126
 
127
-
128
  normalization_layer = layers.Rescaling(1./255)
129
 
130
-
131
-
132
-
133
-
134
-
135
  normalized_ds = train_ds.map(lambda x, y: (normalization_layer(x), y))
136
  image_batch, labels_batch = next(iter(normalized_ds))
137
  first_image = image_batch[0]
138
  # Notice the pixel values are now in `[0,1]`.
139
  print(np.min(first_image), np.max(first_image))
140
 
141
-
142
-
143
-
144
-
145
-
146
-
147
  num_classes = len(class_names)
148
 
149
-
150
-
151
-
152
  data_augmentation = keras.Sequential(
153
- [
154
- layers.RandomFlip("horizontal",
155
- input_shape=(img_height,
156
- img_width,
157
- 3)),
158
- layers.RandomRotation(0.1),
159
- layers.RandomZoom(0.1),
160
- ]
161
  )
162
 
163
-
164
-
165
  plt.figure(figsize=(10, 10))
166
  for images, _ in train_ds.take(1):
167
- for i in range(9):
168
- augmented_images = data_augmentation(images)
169
- ax = plt.subplot(3, 3, i + 1)
170
- plt.imshow(augmented_images[0].numpy().astype("uint8"))
171
- plt.axis("off")
172
-
173
-
174
-
175
 
176
  model = Sequential([
177
- data_augmentation,
178
- layers.Rescaling(1./255),
179
- layers.Conv2D(16, 3, padding='same', activation='relu'),
180
- layers.MaxPooling2D(),
181
- layers.Conv2D(32, 3, padding='same', activation='relu'),
182
- layers.MaxPooling2D(),
183
- layers.Conv2D(64, 3, padding='same', activation='relu'),
184
- layers.MaxPooling2D(),
185
- layers.Dropout(0.2),
186
- layers.Flatten(),
187
- layers.Dense(128, activation='relu'),
188
- layers.Dense(num_classes, name="outputs")
 
 
189
  ])
190
 
191
-
192
-
193
-
194
- model.compile(optimizer='adam',
195
- loss=tf.keras.losses.SparseCategoricalCrossentropy(from_logits=True),
196
  metrics=['accuracy'])
197
 
198
-
199
  model.summary()
200
 
201
-
202
-
203
  epochs = 15
204
  history = model.fit(
205
- train_ds,
206
- validation_data=val_ds,
207
- epochs=epochs
208
  )
209
 
210
-
211
-
212
  def predict_image(img):
213
  img = np.array(img)
214
  img_resized = tf.image.resize(img, (180, 180))
@@ -238,10 +195,3 @@ gr.Interface(
238
  description="The image data set used was obtained from Kaggle and has a collection of 12 different types of agricultural pests: Ants, Bees, Beetles, Caterpillars, Earthworms, Earwigs, Grasshoppers, Moths, Slugs, Snails, Wasps, and Weevils",
239
  css=custom_css
240
  ).launch(debug=True)
241
-
242
-
243
-
244
-
245
-
246
-
247
-
 
 
1
  import gradio as gr
2
  import matplotlib.pyplot as plt
3
  import numpy as np
 
9
  from tensorflow.keras import layers
10
  from tensorflow.keras.models import Sequential
11
 
 
12
  from PIL import Image
13
  import gdown
14
  import zipfile
 
15
  import pathlib
16
 
 
17
  # Define the Google Drive shareable link
18
  gdrive_url = 'https://drive.google.com/file/d/1HjHYlQyRz5oWt8kehkt1TiOGRRlKFsv8/view?usp=drive_link'
19
 
 
53
  for f in files:
54
  print(f"{subindent}{f}")
55
 
 
56
  # Path to the dataset directory
57
  data_dir = pathlib.Path('extracted_files/Pest_Dataset')
58
  data_dir = pathlib.Path(data_dir)
59
 
 
60
  bees = list(data_dir.glob('bees/*'))
61
  print(bees[0])
62
  PIL.Image.open(str(bees[0]))
63
 
 
64
  bees = list(data_dir.glob('bees/*'))
65
  print(bees[0])
66
  PIL.Image.open(str(bees[0]))
67
 
 
 
68
  batch_size = 32
69
  img_height = 180
70
  img_width = 180
71
 
 
72
  train_ds = tf.keras.utils.image_dataset_from_directory(
73
+ data_dir,
74
+ validation_split=0.2,
75
+ subset="training",
76
+ seed=123,
77
+ image_size=(img_height, img_width),
78
+ batch_size=batch_size
79
+ )
80
 
81
  val_ds = tf.keras.utils.image_dataset_from_directory(
82
+ data_dir,
83
+ validation_split=0.2,
84
+ subset="validation",
85
+ seed=123,
86
+ image_size=(img_height, img_width),
87
+ batch_size=batch_size
88
+ )
89
 
90
  class_names = train_ds.class_names
91
  print(class_names)
92
 
 
93
  import matplotlib.pyplot as plt
94
 
95
  plt.figure(figsize=(10, 10))
96
  for images, labels in train_ds.take(1):
97
+ for i in range(9):
98
+ ax = plt.subplot(3, 3, i + 1)
99
+ plt.imshow(images[i].numpy().astype("uint8"))
100
+ plt.title(class_names[labels[i]])
101
+ plt.axis("off")
 
 
102
 
103
  for image_batch, labels_batch in train_ds:
104
+ print(image_batch.shape)
105
+ print(labels_batch.shape)
106
+ break
 
107
 
108
  AUTOTUNE = tf.data.AUTOTUNE
109
 
110
  train_ds = train_ds.cache().shuffle(1000).prefetch(buffer_size=AUTOTUNE)
111
  val_ds = val_ds.cache().prefetch(buffer_size=AUTOTUNE)
112
 
 
113
  normalization_layer = layers.Rescaling(1./255)
114
 
 
 
 
 
 
115
  normalized_ds = train_ds.map(lambda x, y: (normalization_layer(x), y))
116
  image_batch, labels_batch = next(iter(normalized_ds))
117
  first_image = image_batch[0]
118
  # Notice the pixel values are now in `[0,1]`.
119
  print(np.min(first_image), np.max(first_image))
120
 
 
 
 
 
 
 
121
  num_classes = len(class_names)
122
 
 
 
 
123
  data_augmentation = keras.Sequential(
124
+ [
125
+ layers.RandomFlip("horizontal", input_shape=(img_height, img_width, 3)),
126
+ layers.RandomRotation(0.1),
127
+ layers.RandomZoom(0.1),
128
+ ]
 
 
 
129
  )
130
 
 
 
131
  plt.figure(figsize=(10, 10))
132
  for images, _ in train_ds.take(1):
133
+ for i in range(9):
134
+ augmented_images = data_augmentation(images)
135
+ ax = plt.subplot(3, 3, i + 1)
136
+ plt.imshow(augmented_images[0].numpy().astype("uint8"))
137
+ plt.axis("off")
 
 
 
138
 
139
  model = Sequential([
140
+ data_augmentation,
141
+ layers.Rescaling(1./255),
142
+ layers.Conv2D(32, 3, padding='same', activation='relu'),
143
+ layers.MaxPooling2D(),
144
+ layers.Conv2D(64, 3, padding='same', activation='relu'),
145
+ layers.MaxPooling2D(),
146
+ layers.Conv2D(128, 3, padding='same', activation='relu'),
147
+ layers.MaxPooling2D(),
148
+ layers.Conv2D(256, 3, padding='same', activation='relu'),
149
+ layers.MaxPooling2D(),
150
+ layers.Dropout(0.2),
151
+ layers.Flatten(),
152
+ layers.Dense(512, activation='relu'),
153
+ layers.Dense(num_classes, activation='softmax')
154
  ])
155
 
156
+ model.compile(optimizer=keras.optimizers.Adam(learning_rate=0.001),
157
+ loss=tf.keras.losses.SparseCategoricalCrossentropy(from_logits=False),
 
 
 
158
  metrics=['accuracy'])
159
 
 
160
  model.summary()
161
 
 
 
162
  epochs = 15
163
  history = model.fit(
164
+ train_ds,
165
+ validation_data=val_ds,
166
+ epochs=epochs
167
  )
168
 
 
 
169
  def predict_image(img):
170
  img = np.array(img)
171
  img_resized = tf.image.resize(img, (180, 180))
 
195
  description="The image data set used was obtained from Kaggle and has a collection of 12 different types of agricultural pests: Ants, Bees, Beetles, Caterpillars, Earthworms, Earwigs, Grasshoppers, Moths, Slugs, Snails, Wasps, and Weevils",
196
  css=custom_css
197
  ).launch(debug=True)