anh1811 commited on
Commit
525e1e1
·
1 Parent(s): 9cc39ba

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +42 -41
app.py CHANGED
@@ -350,47 +350,48 @@ scaled_anchors = (
350
 
351
  uploaded_file = st.file_uploader("Chọn hình ảnh...", type=["jpg", "jpeg", "png"])
352
  # uploaded_file = '/home/ngocanh/Documents/final_thesis/code/dataset/10_10/base/images/test/000011.jpg'
353
- image = Image.open(uploaded_file)
354
- print("Thuc hien bien doi")
355
-
356
- #task 1
357
- file_path = f"2007_base_{base}_{new}_mAP_{base}_{new}.pth.tar"
358
- model = YOLOv3(num_classes=base).to(device)
359
- checkpoint = torch.load(file_path, map_location=device)
360
- model.load_state_dict(checkpoint["state_dict"])
361
- model.eval()
362
- image_1 = infer(model, image, 0.5, 0.5, scaled_anchors)
363
-
364
- #task 2
365
- file_path = f"2007_task2_{base}_{new}_mAP_{base}_{new}.pth.tar"
366
- model = YOLOv3(num_classes=all).to(device)
367
- checkpoint = torch.load(file_path, map_location=device)
368
- model.load_state_dict(checkpoint["state_dict"])
369
- model.eval()
370
- image_2 = infer(model, image, 0.5, 0.5, scaled_anchors)
371
-
372
- #ft
373
- file_path = f"2007_finetune_{base}_{new}_mAP_{base}_{new}.pth.tar"
374
- checkpoint = torch.load(file_path, map_location=device)
375
- model.load_state_dict(checkpoint["state_dict"])
376
- model.eval()
377
- image_3 = infer(model, image, 0.5, 0.5, scaled_anchors)
378
- # Streamlit App
379
- # Widget tải lên file ảnh
380
-
381
- # note = Image.open("note.png")
382
- # st.image(note, width=150)
383
-
384
-
385
- col1, col2, col3, col4 = st.columns(4)
386
- with col1:
387
- st.image(image, caption="Ảnh đầu vào", use_column_width=True)
388
- with col2:
389
- st.image(image_1, caption="Kết quả task 1", channels="BGR", use_column_width=True)
390
- with col3:
391
- st.image(image_1, caption="Kết quả task 2 (no finetune)", channels="BGR", use_column_width=True)
392
- with col4:
393
- st.image(image_1, caption="Kết quả task 2 (finetune)", channels="BGR", use_column_width=True)
 
394
 
395
 
396
  # import cv2
 
350
 
351
  uploaded_file = st.file_uploader("Chọn hình ảnh...", type=["jpg", "jpeg", "png"])
352
  # uploaded_file = '/home/ngocanh/Documents/final_thesis/code/dataset/10_10/base/images/test/000011.jpg'
353
+ if uploaded_file is not None:sssssssssssssssssssssssssssssssssssssssssssssssss
354
+ image = Image.open(uploaded_file)
355
+ print("Thuc hien bien doi")
356
+
357
+ #task 1
358
+ file_path = f"2007_base_{base}_{new}_mAP_{base}_{new}.pth.tar"
359
+ model = YOLOv3(num_classes=base).to(device)
360
+ checkpoint = torch.load(file_path, map_location=device)
361
+ model.load_state_dict(checkpoint["state_dict"])
362
+ model.eval()
363
+ image_1 = infer(model, image, 0.5, 0.5, scaled_anchors)
364
+
365
+ #task 2
366
+ file_path = f"2007_task2_{base}_{new}_mAP_{base}_{new}.pth.tar"
367
+ model = YOLOv3(num_classes=all).to(device)
368
+ checkpoint = torch.load(file_path, map_location=device)
369
+ model.load_state_dict(checkpoint["state_dict"])
370
+ model.eval()
371
+ image_2 = infer(model, image, 0.5, 0.5, scaled_anchors)
372
+
373
+ #ft
374
+ file_path = f"2007_finetune_{base}_{new}_mAP_{base}_{new}.pth.tar"
375
+ checkpoint = torch.load(file_path, map_location=device)
376
+ model.load_state_dict(checkpoint["state_dict"])
377
+ model.eval()
378
+ image_3 = infer(model, image, 0.5, 0.5, scaled_anchors)
379
+ # Streamlit App
380
+ # Widget tải lên file ảnh
381
+
382
+ # note = Image.open("note.png")
383
+ # st.image(note, width=150)
384
+
385
+
386
+ col1, col2, col3, col4 = st.columns(4)
387
+ with col1:
388
+ st.image(image, caption="Ảnh đầu vào", use_column_width=True)
389
+ with col2:
390
+ st.image(image_1, caption="Kết quả task 1", channels="BGR", use_column_width=True)
391
+ with col3:
392
+ st.image(image_2, caption="Kết quả task 2 (no finetune)", channels="BGR", use_column_width=True)
393
+ with col4:
394
+ st.image(image_3, caption="Kết quả task 2 (finetune)", channels="BGR", use_column_width=True)
395
 
396
 
397
  # import cv2