Recompense commited on
Commit
6b2d096
·
1 Parent(s): 8909a52

Add initial implementation of Food Vision app with Streamlit and TensorFlow

Browse files
Files changed (2) hide show
  1. app.py +522 -0
  2. requirements.txt +3 -0
app.py ADDED
@@ -0,0 +1,522 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import streamlit as st
2
+ from streamlit.runtime.uploaded_file_manager import UploadedFile
3
+ import tensorflow as tf
4
+ import pandas as pd
5
+
6
+ # 🔹 Expand the Page Layout
7
+ st.set_page_config(layout="wide") # Forces full-width mode
8
+
9
+ current_model = "Model Mini"
10
+
11
+ class_names = ['apple_pie',
12
+ 'baby_back_ribs',
13
+ 'baklava',
14
+ 'beef_carpaccio',
15
+ 'beef_tartare',
16
+ 'beet_salad',
17
+ 'beignets',
18
+ 'bibimbap',
19
+ 'bread_pudding',
20
+ 'breakfast_burrito',
21
+ 'bruschetta',
22
+ 'caesar_salad',
23
+ 'cannoli',
24
+ 'caprese_salad',
25
+ 'carrot_cake',
26
+ 'ceviche',
27
+ 'cheese_plate',
28
+ 'cheesecake',
29
+ 'chicken_curry',
30
+ 'chicken_quesadilla',
31
+ 'chicken_wings',
32
+ 'chocolate_cake',
33
+ 'chocolate_mousse',
34
+ 'churros',
35
+ 'clam_chowder',
36
+ 'club_sandwich',
37
+ 'crab_cakes',
38
+ 'creme_brulee',
39
+ 'croque_madame',
40
+ 'cup_cakes',
41
+ 'deviled_eggs',
42
+ 'donuts',
43
+ 'dumplings',
44
+ 'edamame',
45
+ 'eggs_benedict',
46
+ 'escargots',
47
+ 'falafel',
48
+ 'filet_mignon',
49
+ 'fish_and_chips',
50
+ 'foie_gras',
51
+ 'french_fries',
52
+ 'french_onion_soup',
53
+ 'french_toast',
54
+ 'fried_calamari',
55
+ 'fried_rice',
56
+ 'frozen_yogurt',
57
+ 'garlic_bread',
58
+ 'gnocchi',
59
+ 'greek_salad',
60
+ 'grilled_cheese_sandwich',
61
+ 'grilled_salmon',
62
+ 'guacamole',
63
+ 'gyoza',
64
+ 'hamburger',
65
+ 'hot_and_sour_soup',
66
+ 'hot_dog',
67
+ 'huevos_rancheros',
68
+ 'hummus',
69
+ 'ice_cream',
70
+ 'lasagna',
71
+ 'lobster_bisque',
72
+ 'lobster_roll_sandwich',
73
+ 'macaroni_and_cheese',
74
+ 'macarons',
75
+ 'miso_soup',
76
+ 'mussels',
77
+ 'nachos',
78
+ 'omelette',
79
+ 'onion_rings',
80
+ 'oysters',
81
+ 'pad_thai',
82
+ 'paella',
83
+ 'pancakes',
84
+ 'panna_cotta',
85
+ 'peking_duck',
86
+ 'pho',
87
+ 'pizza',
88
+ 'pork_chop',
89
+ 'poutine',
90
+ 'prime_rib',
91
+ 'pulled_pork_sandwich',
92
+ 'ramen',
93
+ 'ravioli',
94
+ 'red_velvet_cake',
95
+ 'risotto',
96
+ 'samosa',
97
+ 'sashimi',
98
+ 'scallops',
99
+ 'seaweed_salad',
100
+ 'shrimp_and_grits',
101
+ 'spaghetti_bolognese',
102
+ 'spaghetti_carbonara',
103
+ 'spring_rolls',
104
+ 'steak',
105
+ 'strawberry_shortcake',
106
+ 'sushi',
107
+ 'tacos',
108
+ 'takoyaki',
109
+ 'tiramisu',
110
+ 'tuna_tartare',
111
+ 'waffles']
112
+
113
+ top_ten_dict = {
114
+ "class_name": ["edamame", "macarons", "oysters", "pho",
115
+ "mussles", "sashimi", "seaweed_salad", "dumplings", "guacamole", "onion_rings"],
116
+ "f1-score": [0.964427, 0.900433, 0.853119, 0.852652, 0.850622,
117
+ 0.844794, 0.834356, 0.833006, 0.83209, 0.831967]
118
+ }
119
+
120
+ last_ten_dict = {
121
+ "class_name": ["chocolate_mousse", "tuna_tartare",
122
+ "scallops", "huevos_rancheros", "foie_gras", "steak",
123
+ "bread_pudding", "ravioli", "pork_chop", "apple_pie"],
124
+ "f1-score": [0.413793, 0.399254, 0.383693, 0.367698,
125
+ 0.354497, 0.340426, 0.340045, 0.339785, 0.324826, 0.282407]
126
+ }
127
+
128
+ # 🔹 Custom CSS for Full Width & Centered Content
129
+ st.markdown(
130
+ """
131
+ <style>
132
+ /* Make the main container wider */
133
+ .main-container {
134
+ max-width: 95% !important;
135
+ margin: auto;
136
+ }
137
+
138
+ /* Center all content inside containers */
139
+ .centered {
140
+ display: flex;
141
+ flex-direction: column;
142
+ align-items: center;
143
+ justify-content: center;
144
+ text-align: center;
145
+ width: 100%;
146
+ }
147
+
148
+ .centeredh {
149
+ display: flex;
150
+ width: 80%;
151
+ }
152
+
153
+ /* Ensure file uploader is not constrained */
154
+ div[data-testid="stFileUploader"] {
155
+ width: 70% !important;
156
+ }
157
+
158
+ /* Center images */
159
+ img {
160
+ display: block;
161
+ margin-left: auto;
162
+ margin-right: auto;
163
+ width: 200px;
164
+ height: 200px;
165
+ border-radius: 20px;
166
+ }
167
+ </style>
168
+ """,
169
+ unsafe_allow_html=True
170
+ )
171
+
172
+ st.title("Food vision demo App 🍔🧠")
173
+ st.header(
174
+ "A food vision app, using a Machine Learning Model(CNN), fine tuned on EfficientNet.")
175
+
176
+ st.divider()
177
+ st.subheader("What is a CNN(Convolutional Neural Network)")
178
+ st.write("A Neural network is network of nodes, consiting of input nodes, output nodes and hidden nodes.\
179
+ Each node lies in its respective layer, corresponding to its name. \
180
+ The input nodes reside in the input layer, the output nodes reside in the output layer and the hidden\
181
+ nodes reside in the hidden layer. The nodes pass information from the input layer to the output layer.\
182
+ The information consists of data(text, numbers, pictures, audio, videos) encoded as numbers\
183
+ that the network uses to learn information. It does this through complex mathematical operations\
184
+ and algorithms.")
185
+
186
+ # Display image of Neural Network here in between dividers
187
+
188
+ st.write("A Convolutional Neural Network in short is a version\
189
+ of a Neural Network that specializes on Images, video, basically anything visual.")
190
+
191
+ st.divider()
192
+ code = """import tensorflow as tf
193
+ from tensorflow.keras import mixed_precision
194
+
195
+ # Enable mixed precision
196
+ mixed_precision.set_global_policy("mixed_float16")
197
+
198
+ image_shape = (224, 224, 3)
199
+
200
+ # Load EfficientNet with mixed precision
201
+ base_model = tf.keras.applications.EfficientNetB0(include_top=False)
202
+ base_model.trainable = False
203
+
204
+ inputs = tf.keras.layers.Input(shape=image_shape, name="input_layer")
205
+
206
+ # Apply data augmentation
207
+ x = data_augmentation(inputs)
208
+
209
+ x = base_model(x, training=False)
210
+ x = tf.keras.layers.GlobalAveragePooling2D(name="global_average_pooling_layer")(x)
211
+
212
+ x = tf.keras.layers.Dense(len(train_data.class_names), name="dense_logits")(x)
213
+
214
+ # Ensure output layer remains in FP32
215
+ outputs = tf.keras.layers.Activation(activation="softmax", dtype=tf.float32, name="predictions")(x)
216
+
217
+ model = tf.keras.Model(inputs, outputs)
218
+
219
+ # Use a LossScaleOptimizer to prevent numerical issues
220
+ optimizer = mixed_precision.LossScaleOptimizer(tf.keras.optimizers.Adam())
221
+
222
+ model.compile(loss=tf.keras.losses.CategoricalCrossentropy(),
223
+ optimizer=optimizer,
224
+ metrics=["accuracy"])
225
+
226
+ # Train the model
227
+ history = model.fit(train_data, epochs=5, validation_data=test_data,
228
+ validation_steps=int(0.15 * len(test_data)),
229
+ callbacks=[create_tensorboard_callback("model_mini", "model"), checkpoint_callback])"""
230
+ st.subheader("Sample Code for the CNN using TensorFlow Functional API using Transfer Learning (NOT FULL CODE)")
231
+ st.code(code, language="python")
232
+ st.divider()
233
+
234
+ st.divider()
235
+
236
+ st.subheader("What is Efficient Net")
237
+ st.write("EfficientNet is a family of convolutional neural networks that are designed to be more efficient and accurate. \
238
+ It scales up the model's width, depth, and resolution in a balanced way, which helps to achieve better performance \
239
+ with fewer resources. In simple terms, EfficientNet can achieve high accuracy on image classification tasks while \
240
+ using less computational power and memory compared to other models.")
241
+
242
+ st.divider()
243
+ st.subheader("What is Fine Tuning")
244
+ st.write("Fine-tuning is a process in machine learning where a pre-trained model is further trained on a new, but related, dataset. \
245
+ This helps the model to adapt to the new data and improve its performance on specific tasks. \
246
+ Essentially, it takes advantage of the knowledge the model has already gained and refines it for better accuracy.")
247
+
248
+ st.divider()
249
+ tune_code = """# Load feature extraction weights
250
+ model.load_weights(checkpoint_path)
251
+
252
+ # Unfreeze all layers in the base model
253
+ base_model.trainable = True
254
+
255
+ # Freeze all layers except the last 5
256
+ for layer in base_model.layers[:-5]:
257
+ layer.trainable = False
258
+
259
+ # Use a LossScaleOptimizer to prevent numerical issues
260
+ optimizer = mixed_precision.LossScaleOptimizer(tf.keras.optimizers.Adam())
261
+
262
+ # Recompile the Model with Lower Learning Rate to reduce overfitting
263
+ model.compile(loss=tf.keras.losses.CategoricalCrossentropy(),
264
+ optimizer=optimizer, metrics=["accuracy"]) # Learning rate lowered by 10x
265
+
266
+ model_tuned_history = model.fit(train_data, epochs=10, initial_epoch=history.epoch[-1],
267
+ validation_data=test_data, validation_steps=int(0.15 * len(test_data)),
268
+ callbacks=[create_tensorboard_callback("model_mini", "model_tuned")])"""
269
+
270
+ st.subheader("Example of Fine Tuning Using TensorFlow (NOT FULL CODE)")
271
+ st.code(tune_code, language="python")
272
+
273
+ st.divider()
274
+ st.subheader("Model Building Details")
275
+ st.write(f'The Model was built using the :blue[Food101 kaggle dataset].\
276
+ The Dataset consist of 101 classes of Food.\
277
+ Namely: {[food.replace("_", "").title() for food in class_names]}')
278
+
279
+ st.divider()
280
+ st.write("When predicting you have to pass an image of any of the 101 classes of food.\
281
+ The Model has not yet been trained outside the 101 classes of food yet.")
282
+
283
+ st.divider()
284
+ st.subheader("Top and Least Classes Performance.")
285
+ st.write("After training, some classes evidently performed better than others.\
286
+ Below are the performance of the top classes and least classes based on the F1 score")
287
+
288
+ st.divider()
289
+ st.subheader("F1-score")
290
+ st.write("The F1 score is a measure of a test's accuracy, which considers both the precision and the recall of the test to compute the score. The F1 score is the harmonic mean of precision and recall, where an F1 score reaches its best value at 1 (perfect precision and recall) and worst at 0. \
291
+ Precision is the number of true positive results divided by the number of all positive results, including those not correctly identified (i.e., the proportion of positive identifications that were actually correct). \
292
+ Recall (or Sensitivity) is the number of true positive results divided by the number of positives that should have been identified (i.e., the proportion of actual positives that were correctly identified).")
293
+
294
+ st.divider()
295
+ st.subheader("The formula for F1-score is")
296
+ st.latex(r"F_1 = \frac{2 \times \text{Precision} \times \text{Recall}}{\text{Precision} + \text{Recall}}")
297
+ st.divider()
298
+
299
+ # Top 10 last 10 Bar charts
300
+ st.subheader("Top and Least Classes")
301
+ with st.container():
302
+ st.markdown('<div class="centeredh">', unsafe_allow_html=True) # START DIV BLOCK
303
+
304
+ top_ten = pd.DataFrame(top_ten_dict).sort_values("f1-score", ascending=False)
305
+ last_ten = pd.DataFrame(last_ten_dict).sort_values("f1-score", ascending=True)
306
+
307
+ col1, col2 = st.columns(2)
308
+
309
+ with col1:
310
+ st.write("Top 10 Classes.")
311
+ st.bar_chart(top_ten, x="class_name", y="f1-score",
312
+ horizontal=True, use_container_width=True)
313
+
314
+ with col2:
315
+ st.write("Last 10 classes")
316
+ st.bar_chart(last_ten, x="class_name", y="f1-score",
317
+ horizontal=True, use_container_width=True, color="#ff748c")
318
+
319
+ st.markdown('</div>', unsafe_allow_html=True) # CLOSE DIV BLOCK
320
+
321
+ new_model = "Food Vision"
322
+ st.divider()
323
+ st.divider()
324
+ st.header(f"Try out the Current Models, :blue[{current_model}] and :blue[{new_model}] your self.")
325
+ st.caption("_The Model is periodically being improved. Model might change in the future_.")
326
+
327
+
328
+ def load_model(filepath):
329
+ """
330
+ Loads a Tensorflow keras Model from a file path
331
+
332
+ Args:
333
+ filepath(str): File path to the Model.
334
+
335
+ Returns
336
+ A Tensorflow keras loaded Model.
337
+ """
338
+ with st.spinner("Loading Model..."):
339
+ try:
340
+ loaded_model = tf.keras.models.load_model(filepath)
341
+ except Exception as e:
342
+ st.error(f"Can't load Model: {e}")
343
+ else:
344
+ if loaded_model:
345
+ return loaded_model
346
+
347
+
348
+ def load_prep_image(image: UploadedFile, img_shape=224, scale=True):
349
+ """
350
+ Reads in an image and preprocesses it for model prediction
351
+
352
+ Args:
353
+ image (UploadedFile): path to target image
354
+ img_shape (int): shape to resize image to. Default = 224
355
+ scale (bool): Condition to scale image. Default = True
356
+
357
+ Returns:
358
+ Image Tensor of shape (img_shape, img_shape, 3)
359
+ """
360
+ bytes_data = image.getvalue()
361
+ image_tensor = tf.io.decode_image(bytes_data, channels=3)
362
+ image_tensor = tf.image.resize(image_tensor, [img_shape, img_shape])
363
+ image_tensor = tf.expand_dims(image_tensor, axis=0) # Expand dimension as needed by Model
364
+ if scale:
365
+ scaled_image_tensor = image_tensor / 255. # If model does not have built in scaling
366
+ return scaled_image_tensor
367
+ else:
368
+ return image_tensor
369
+
370
+
371
+ def predict_using_model(image: UploadedFile, model_path: str) -> str:
372
+ """
373
+ This function uses the CNN Model to predict the class name of the uploaded
374
+ input image.
375
+
376
+ Args:
377
+ model_path(str): The path to the Model
378
+ image(UploadedFile Object): the uploaded image.
379
+
380
+ Returns:
381
+ predicted_class_name(str): the name of the predicted class.
382
+ """
383
+ with st.spinner("Predicting using your image..."):
384
+ # Process the image
385
+ processed_image = load_prep_image(image, scale=False) # EfficientNet has built in scaling
386
+ model = load_model(model_path)
387
+ pred_prob = model.predict(processed_image)
388
+ predicted_class = class_names[pred_prob.argmax()] # Get the predicted class name
389
+
390
+ return predicted_class
391
+
392
+
393
+ def toggle_checkbox(option: str) -> None:
394
+ """Toggle upload checkboxes such that only one can be selected"""
395
+ if option == "upload":
396
+ st.session_state.upload = True
397
+ st.session_state.camera = False
398
+ elif option == "camera":
399
+ st.session_state.upload = False
400
+ st.session_state.camera = True
401
+
402
+
403
+ def toggle_model(option: str) -> None:
404
+ """Toggles model checkboxes such that only one can be selected"""
405
+ if option == "model_mini":
406
+ st.session_state.model_mini = True
407
+ st.session_state.food_vision = False
408
+ elif option == "food_vision":
409
+ st.session_state.model_mini = False
410
+ st.session_state.food_vision = True
411
+
412
+
413
+ # 🔹 Apply the main container styling
414
+ st.markdown('<div class="main-container">', unsafe_allow_html=True)
415
+
416
+ # 🔹 Create a wider main container
417
+ with st.container():
418
+ # Define columns inside the main container
419
+ cols = st.columns([3, 1, 2, 1, 2], gap="medium")
420
+ has_predicted = False
421
+ has_uploaded = False
422
+
423
+ # 🖼️ Image Input Container
424
+ with cols[0]:
425
+ with st.container():
426
+ st.markdown('<div class="centered">', unsafe_allow_html=True) # START DIV BLOCK
427
+
428
+ with st.spinner("Uploading image..."):
429
+ try:
430
+ upload = st.checkbox("Upload Image", key="upload",
431
+ on_change=toggle_checkbox, args=("upload",))
432
+ camera = st.checkbox("Use your camera", key="camera",
433
+ on_change=toggle_checkbox, args=("camera",))
434
+ if upload:
435
+ uploaded_image = st.file_uploader(label="Upload an image (Max 200MB)",
436
+ type=["png", "jpg", "jpeg"],
437
+ accept_multiple_files=False, key="uploaded_image")
438
+
439
+ has_uploaded = True # To check if file_uploader widget has loaded
440
+
441
+ if "uploaded_image" not in st.session_state:
442
+ st.session_state["uploaded_image"] = uploaded_image
443
+
444
+ elif camera:
445
+ uploaded_image = st.camera_input("Take a Picture",
446
+ disabled=not camera, key="uploaded_image")
447
+
448
+ has_uploaded = True # To check if camera_input widget has loaded
449
+
450
+ if "uploaded_image" not in st.session_state:
451
+ st.session_state["uploaded_image"] = uploaded_image
452
+
453
+ except Exception as e:
454
+ st.error(f"Image Upload failed: {e}")
455
+ else:
456
+ if has_uploaded: # If file_uploader/camera_input widget has loaded
457
+ if uploaded_image: # If user has uploaded an image
458
+ st.success("Image Uploaded.")
459
+ st.image(st.session_state.uploaded_image,
460
+ caption="Your uploaded image", width=200)
461
+
462
+ st.markdown('</div>', unsafe_allow_html=True) # CLOSE DIV BLOCK
463
+
464
+ # ➡️ Arrow 1 Container
465
+ with cols[1]:
466
+ with st.container():
467
+ st.markdown('<div class="centered">', unsafe_allow_html=True)
468
+ st.write("➡️") # Example arrow to be changed to image
469
+ st.markdown('</div>', unsafe_allow_html=True)
470
+
471
+ # 🧠 Neural Network Image Container
472
+ with cols[2]:
473
+ with st.container():
474
+ st.markdown('<div class="centered">', unsafe_allow_html=True)
475
+
476
+ st.write("Pick a Model")
477
+ model_mini = st.checkbox("Model Mini", key="model_mini",
478
+ on_change=toggle_model, args=("model_mini",))
479
+ food_vision = st.checkbox("Food Vision", key="food_vision",
480
+ on_change=toggle_model, args=("food_vision",))
481
+
482
+ if model_mini:
483
+ st.image("brain.png")
484
+ elif food_vision:
485
+ st.image("content/creativity_15557951.png") # To be changed
486
+
487
+ if has_uploaded:
488
+ status = st.button(label="Predict Using Image", icon="⚛️", type="primary")
489
+ if status and model_mini:
490
+ result_class = predict_using_model(uploaded_image,
491
+ model_path="models/model_mini_Food101.keras")
492
+ has_predicted = True
493
+ elif status and food_vision:
494
+ result_class = predict_using_model(uploaded_image, model_path="models/FoodVision.keras")
495
+ has_predicted = True
496
+
497
+ st.markdown('</div>', unsafe_allow_html=True)
498
+
499
+ # ➡️ Arrow 2 Container
500
+ with cols[3]:
501
+ with st.container():
502
+ st.markdown('<div class="centered">', unsafe_allow_html=True)
503
+ st.write("➡️") # Example arrow to be changed to image
504
+ st.markdown('</div>', unsafe_allow_html=True)
505
+
506
+ # 🏆 Output Container
507
+ with cols[4]:
508
+ with st.container():
509
+ st.markdown('<div class="centered">', unsafe_allow_html=True)
510
+ if has_predicted:
511
+ st.image(st.session_state.uploaded_image)
512
+ if "_" in result_class:
513
+ modified_class = result_class.replace("_", "").title()
514
+ st.write(f"This is an image of :blue[{modified_class}]")
515
+ else:
516
+ st.write(f"This is an image of :blue[{result_class.title()}]")
517
+ else:
518
+ st.write("The Image and Prediction will appear here")
519
+ st.markdown('</div>', unsafe_allow_html=True)
520
+
521
+ # Close the widened container
522
+ st.markdown('</div>', unsafe_allow_html=True)
requirements.txt ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ streamlit>=1.24.0
2
+ tensorflow>=2.12.0
3
+ pandas>=2.0.0