File size: 9,513 Bytes
64b9b16
 
 
 
 
 
20215ab
 
64b9b16
20215ab
64b9b16
 
 
d02d459
20215ab
7e0dbbe
 
 
 
 
 
 
 
 
 
 
 
 
20215ab
7e0dbbe
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
20215ab
2f472db
7e0dbbe
0fd96a3
 
 
 
 
 
 
 
2f472db
20215ab
0c26d6f
74e1781
 
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
# import os
# import sys
# import argparse
# import time
# from pathlib import Path
# import pandas as pd

import gradio as gr
# import cv2
from PIL import Image
# import torch
# import torch.backends.cudnn as cudnn
# from numpy import random
import numpy as np

# BASE_DIR = "/home/user/app"
# os.chdir(BASE_DIR)
# os.makedirs(f"{BASE_DIR}/input",exist_ok=True)
# os.system(f"git clone https://github.com/WongKinYiu/yolov7.git {BASE_DIR}/yolov7")
# sys.path.append(f'{BASE_DIR}/yolov7')

# def detect(opt, save_img=False):    
#     from models.experimental import attempt_load
#     from utils.datasets import LoadStreams, LoadImages
#     from utils.general import check_img_size, check_requirements, check_imshow, non_max_suppression, apply_classifier, \
#         scale_coords, xyxy2xywh, strip_optimizer, set_logging, increment_path
#     from utils.plots import plot_one_box
#     from utils.torch_utils import select_device, load_classifier, time_synchronized, TracedModel
    
#     bbox = {}
#     source, weights, view_img, save_txt, imgsz, trace = opt.source, opt.weights, opt.view_img, opt.save_txt, opt.img_size, not opt.no_trace
#     save_img = not opt.nosave and not source.endswith('.txt')  # save inference images
#     webcam = source.isnumeric() or source.endswith('.txt') or source.lower().startswith(
#         ('rtsp://', 'rtmp://', 'http://', 'https://'))

#     # Directories
#     save_dir = Path(increment_path(Path(opt.project) / opt.name, exist_ok=opt.exist_ok))  # increment run
#     (save_dir / 'labels' if save_txt else save_dir).mkdir(parents=True, exist_ok=True)  # make dir

#     # Initialize
#     set_logging()
#     device = select_device(opt.device)
#     half = device.type != 'cpu'  # half precision only supported on CUDA

#     # Load model
#     model = attempt_load(weights, map_location=device)  # load FP32 model
#     stride = int(model.stride.max())  # model stride
#     imgsz = check_img_size(imgsz, s=stride)  # check img_size

#     if trace:
#         model = TracedModel(model, device, opt.img_size)

#     if half:
#         model.half()  # to FP16

#     # Second-stage classifier
#     classify = False
#     if classify:
#         modelc = load_classifier(name='resnet101', n=2)  # initialize
#         modelc.load_state_dict(torch.load('weights/resnet101.pt', map_location=device)['model']).to(device).eval()

#     # Set Dataloader
#     vid_path, vid_writer = None, None
#     if webcam:
#         view_img = check_imshow()
#         cudnn.benchmark = True  # set True to speed up constant image size inference
#         dataset = LoadStreams(source, img_size=imgsz, stride=stride)
#     else:
#         dataset = LoadImages(source, img_size=imgsz, stride=stride)

#     # Get names and colors
#     names = model.module.names if hasattr(model, 'module') else model.names
#     colors = [[random.randint(0, 255) for _ in range(3)] for _ in names]

#     # Run inference
#     if device.type != 'cpu':
#         model(torch.zeros(1, 3, imgsz, imgsz).to(device).type_as(next(model.parameters())))  # run once
#     old_img_w = old_img_h = imgsz
#     old_img_b = 1

#     t0 = time.time()
#     for path, img, im0s, vid_cap in dataset:
#         img = torch.from_numpy(img).to(device)
#         img = img.half() if half else img.float()  # uint8 to fp16/32
#         img /= 255.0  # 0 - 255 to 0.0 - 1.0
#         if img.ndimension() == 3:
#             img = img.unsqueeze(0)

#         # Warmup
#         if device.type != 'cpu' and (old_img_b != img.shape[0] or old_img_h != img.shape[2] or old_img_w != img.shape[3]):
#             old_img_b = img.shape[0]
#             old_img_h = img.shape[2]
#             old_img_w = img.shape[3]
#             for i in range(3):
#                 model(img, augment=opt.augment)[0]

#         # Inference
#         t1 = time_synchronized()
#         with torch.no_grad():   # Calculating gradients would cause a GPU memory leak
#             pred = model(img, augment=opt.augment)[0]
#         t2 = time_synchronized()

#         # Apply NMS
#         pred = non_max_suppression(pred, opt.conf_thres, opt.iou_thres, classes=opt.classes, agnostic=opt.agnostic_nms)
#         t3 = time_synchronized()

#         # Apply Classifier
#         if classify:
#             pred = apply_classifier(pred, modelc, img, im0s)

#         # Process detections
#         for i, det in enumerate(pred):  # detections per image
#             if webcam:  # batch_size >= 1
#                 p, s, im0, frame = path[i], '%g: ' % i, im0s[i].copy(), dataset.count
#             else:
#                 p, s, im0, frame = path, '', im0s, getattr(dataset, 'frame', 0)

#             p = Path(p)  # to Path
#             save_path = str(save_dir / p.name)  # img.jpg
#             txt_path = str(save_dir / 'labels' / p.stem) + ('' if dataset.mode == 'image' else f'_{frame}')  # img.txt
#             gn = torch.tensor(im0.shape)[[1, 0, 1, 0]]  # normalization gain whwh
#             if len(det):
#                 # Rescale boxes from img_size to im0 size
#                 det[:, :4] = scale_coords(img.shape[2:], det[:, :4], im0.shape).round()
# #                 print(f"BOXES ---->>>> {det[:, :4]}")
#                 bbox[f"{txt_path.split('/')[4]}"]=(det[:, :4]).numpy()

#                # Print results
#                 for c in det[:, -1].unique():
#                     n = (det[:, -1] == c).sum()  # detections per class
#                     s += f"{n} {names[int(c)]}{'s' * (n > 1)}, "  # add to string

#                 # Write results
#                 for *xyxy, conf, cls in reversed(det):
#                     if save_txt:  # Write to file
#                         xywh = (xyxy2xywh(torch.tensor(xyxy).view(1, 4)) / gn).view(-1).tolist()  # normalized xywh
#                         line = (cls, *xywh, conf) if opt.save_conf else (cls, *xywh)  # label format
#                         with open(txt_path + '.txt', 'a') as f:
#                             f.write(('%g ' * len(line)).rstrip() % line + '\n')

#                     if save_img or view_img:  # Add bbox to image
#                         label = f'{names[int(cls)]} {conf:.2f}'
#                         plot_one_box(xyxy, im0, label=label, color=colors[int(cls)], line_thickness=1)

#             # Print time (inference + NMS)
#             print(f'{s}Done. ({(1E3 * (t2 - t1)):.1f}ms) Inference, ({(1E3 * (t3 - t2)):.1f}ms) NMS')

#             # Stream results
#             # if view_img:
#             #     cv2.imshow(str(p), im0)
#             #     cv2.waitKey(1)  # 1 millisecond

#             # Save results (image with detections)
#             if save_img:
#                 if dataset.mode == 'image':
#                     # Image.fromarray(im0).show()
#                     cv2.imwrite(save_path, im0) 
#                     print(f" The image with the result is saved in: {save_path}")
#                 # else:  # 'video' or 'stream'
#                 #     if vid_path != save_path:  # new video
#                 #         vid_path = save_path
#                 #         if isinstance(vid_writer, cv2.VideoWriter):
#                 #             vid_writer.release()  # release previous video writer
#                 #         if vid_cap:  # video
#                 #             fps = vid_cap.get(cv2.CAP_PROP_FPS)
#                 #             w = int(vid_cap.get(cv2.CAP_PROP_FRAME_WIDTH))
#                 #             h = int(vid_cap.get(cv2.CAP_PROP_FRAME_HEIGHT))
#                 #         else:  # stream
#                 #             fps, w, h = 30, im0.shape[1], im0.shape[0]
#                 #             save_path += '.mp4'
#                 #         vid_writer = cv2.VideoWriter(save_path, cv2.VideoWriter_fourcc(*'mp4v'), fps, (w, h))
#                 #     vid_writer.write(im0)

#     if save_txt or save_img:
#         s = f"\n{len(list(save_dir.glob('labels/*.txt')))} labels saved to {save_dir / 'labels'}" if save_txt else ''
#         #print(f"Results saved to {save_dir}{s}")

#     print(f'Done. ({time.time() - t0:.3f}s)')
#     return bbox,save_path

# class options:
#     def __init__(self, weights, source, img_size=640, conf_thres=0.1, iou_thres=0.45, device='', 
#                  view_img=False, save_txt=False, save_conf=False, nosave=False, classes=None, 
#                  agnostic_nms=False, augment=False, update=False, project='runs/detect', name='exp', 
#                  exist_ok=False, no_trace=False):
#         self.weights=weights
#         self.source=source
#         self.img_size=img_size
#         self.conf_thres=conf_thres
#         self.iou_thres=iou_thres
#         self.device=device
#         self.view_img=view_img
#         self.save_txt=save_txt
#         self.save_conf=save_conf
#         self.nosave=nosave
#         self.classes=classes
#         self.agnostic_nms=agnostic_nms
#         self.augment=augment
#         self.update=update
#         self.project=project
#         self.name=name
#         self.exist_ok=exist_ok
#         self.no_trace=no_trace

def get_output(input_image):
    # image.save(f"{BASE_DIR}/input/image.jpg")
    # source = f"{BASE_DIR}/input"
    # opt = options(weights='logo_detection.pt',source=source)
    # bbox = None
    # with torch.no_grad():
    #     bbox,output_path = detect(opt)
    # if os.path.exists(output_path):
    #     return np.array(Image.open(output_path))
    # else:
    return input_image


demo = gr.Interface(fn=get_output, inputs="image", outputs="image")
demo.launch(debug=True)