Update app.py
Browse files
app.py
CHANGED
@@ -350,47 +350,48 @@ scaled_anchors = (
|
|
350 |
|
351 |
uploaded_file = st.file_uploader("Chọn hình ảnh...", type=["jpg", "jpeg", "png"])
|
352 |
# uploaded_file = '/home/ngocanh/Documents/final_thesis/code/dataset/10_10/base/images/test/000011.jpg'
|
353 |
-
|
354 |
-
|
355 |
-
|
356 |
-
|
357 |
-
|
358 |
-
|
359 |
-
|
360 |
-
|
361 |
-
model.
|
362 |
-
|
363 |
-
|
364 |
-
|
365 |
-
|
366 |
-
|
367 |
-
|
368 |
-
|
369 |
-
model.
|
370 |
-
|
371 |
-
|
372 |
-
|
373 |
-
|
374 |
-
|
375 |
-
|
376 |
-
model.
|
377 |
-
|
378 |
-
|
379 |
-
#
|
380 |
-
|
381 |
-
|
382 |
-
#
|
383 |
-
|
384 |
-
|
385 |
-
|
386 |
-
|
387 |
-
|
388 |
-
|
389 |
-
|
390 |
-
|
391 |
-
|
392 |
-
|
393 |
-
|
|
|
394 |
|
395 |
|
396 |
# import cv2
|
|
|
350 |
|
351 |
uploaded_file = st.file_uploader("Chọn hình ảnh...", type=["jpg", "jpeg", "png"])
|
352 |
# uploaded_file = '/home/ngocanh/Documents/final_thesis/code/dataset/10_10/base/images/test/000011.jpg'
|
353 |
+
if uploaded_file is not None:sssssssssssssssssssssssssssssssssssssssssssssssss
|
354 |
+
image = Image.open(uploaded_file)
|
355 |
+
print("Thuc hien bien doi")
|
356 |
+
|
357 |
+
#task 1
|
358 |
+
file_path = f"2007_base_{base}_{new}_mAP_{base}_{new}.pth.tar"
|
359 |
+
model = YOLOv3(num_classes=base).to(device)
|
360 |
+
checkpoint = torch.load(file_path, map_location=device)
|
361 |
+
model.load_state_dict(checkpoint["state_dict"])
|
362 |
+
model.eval()
|
363 |
+
image_1 = infer(model, image, 0.5, 0.5, scaled_anchors)
|
364 |
+
|
365 |
+
#task 2
|
366 |
+
file_path = f"2007_task2_{base}_{new}_mAP_{base}_{new}.pth.tar"
|
367 |
+
model = YOLOv3(num_classes=all).to(device)
|
368 |
+
checkpoint = torch.load(file_path, map_location=device)
|
369 |
+
model.load_state_dict(checkpoint["state_dict"])
|
370 |
+
model.eval()
|
371 |
+
image_2 = infer(model, image, 0.5, 0.5, scaled_anchors)
|
372 |
+
|
373 |
+
#ft
|
374 |
+
file_path = f"2007_finetune_{base}_{new}_mAP_{base}_{new}.pth.tar"
|
375 |
+
checkpoint = torch.load(file_path, map_location=device)
|
376 |
+
model.load_state_dict(checkpoint["state_dict"])
|
377 |
+
model.eval()
|
378 |
+
image_3 = infer(model, image, 0.5, 0.5, scaled_anchors)
|
379 |
+
# Streamlit App
|
380 |
+
# Widget tải lên file ảnh
|
381 |
+
|
382 |
+
# note = Image.open("note.png")
|
383 |
+
# st.image(note, width=150)
|
384 |
+
|
385 |
+
|
386 |
+
col1, col2, col3, col4 = st.columns(4)
|
387 |
+
with col1:
|
388 |
+
st.image(image, caption="Ảnh đầu vào", use_column_width=True)
|
389 |
+
with col2:
|
390 |
+
st.image(image_1, caption="Kết quả task 1", channels="BGR", use_column_width=True)
|
391 |
+
with col3:
|
392 |
+
st.image(image_2, caption="Kết quả task 2 (no finetune)", channels="BGR", use_column_width=True)
|
393 |
+
with col4:
|
394 |
+
st.image(image_3, caption="Kết quả task 2 (finetune)", channels="BGR", use_column_width=True)
|
395 |
|
396 |
|
397 |
# import cv2
|