nathanjc commited on
Commit
c97a3ea
·
verified ·
1 Parent(s): 7d6bf72
Files changed (1) hide show
  1. app.py +55 -45
app.py CHANGED
@@ -13,10 +13,22 @@ import torch.backends.cudnn as cudnn
13
  from numpy import random
14
  import numpy as np
15
 
 
 
 
 
 
 
 
 
 
 
 
 
 
16
  BASE_DIR = "/home/user/app"
17
  os.chdir(BASE_DIR)
18
- os.makedirs(f"{BASE_DIR}/input",exist_ok=True)
19
- # os.system(f"git clone https://github.com/WongKinYiu/yolov7.git {BASE_DIR}/yolov7")
20
  sys.path.append(f'{BASE_DIR}/yolov7')
21
  os.system("pip install yolov7-package==0.0.12")
22
 
@@ -35,13 +47,6 @@ def plot_one_box(x, img, color=None, label=None, line_thickness=3):
35
 
36
 
37
  def detect(opt, save_img=False):
38
- # from models.experimental import attempt_load
39
- # from utils.datasets import LoadStreams, LoadImages
40
- # from utils.general import check_img_size, check_requirements, check_imshow, non_max_suppression, apply_classifier, \
41
- # scale_coords, xyxy2xywh, strip_optimizer, set_logging, increment_path
42
- # from utils.plots import plot_one_box
43
- # from utils.torch_utils import select_device, load_classifier, time_synchronized, TracedModel
44
-
45
  from yolov7_package import Yolov7Detector
46
  from yolov7_package.models.experimental import attempt_load
47
  from yolov7_package.utils.general import check_img_size, check_requirements, check_imshow, non_max_suppression, apply_classifier, \
@@ -49,6 +54,15 @@ def detect(opt, save_img=False):
49
  from yolov7_package.utils.torch_utils import select_device, load_classifier, time_synchronized, TracedModel
50
  from yolov7_package.utils.datasets import LoadStreams, LoadImages
51
 
 
 
 
 
 
 
 
 
 
52
  bbox = {}
53
  source, weights, view_img, save_txt, imgsz, trace = opt.source, opt.weights, opt.view_img, opt.save_txt, opt.img_size, not opt.no_trace
54
  save_img = not opt.nosave and not source.endswith('.txt') # save inference images
@@ -65,8 +79,16 @@ def detect(opt, save_img=False):
65
  half = device.type != 'cpu' # half precision only supported on CUDA
66
 
67
  # Load model
68
- det = Yolov7Detector(weights=weights, traced=False)
69
- model = attempt_load(weights, map_location=device) # load FP32 model
 
 
 
 
 
 
 
 
70
  stride = int(model.stride.max()) # model stride
71
  imgsz = check_img_size(imgsz, s=stride) # check img_size
72
 
@@ -145,7 +167,6 @@ def detect(opt, save_img=False):
145
  if len(det):
146
  # Rescale boxes from img_size to im0 size
147
  det[:, :4] = scale_coords(img.shape[2:], det[:, :4], im0.shape).round()
148
- # print(f"BOXES ---->>>> {det[:, :4]}")
149
  bbox[f"{txt_path.split('/')[4]}"]=(det[:, :4]).numpy()
150
 
151
  # Print results
@@ -168,38 +189,17 @@ def detect(opt, save_img=False):
168
  # Print time (inference + NMS)
169
  print(f'{s}Done. ({(1E3 * (t2 - t1)):.1f}ms) Inference, ({(1E3 * (t3 - t2)):.1f}ms) NMS')
170
 
171
- # Stream results
172
- # if view_img:
173
- # cv2.imshow(str(p), im0)
174
- # cv2.waitKey(1) # 1 millisecond
175
-
176
  # Save results (image with detections)
177
  if save_img:
178
  if dataset.mode == 'image':
179
- # Image.fromarray(im0).show()
180
  cv2.imwrite(save_path, im0)
181
  print(f" The image with the result is saved in: {save_path}")
182
- # else: # 'video' or 'stream'
183
- # if vid_path != save_path: # new video
184
- # vid_path = save_path
185
- # if isinstance(vid_writer, cv2.VideoWriter):
186
- # vid_writer.release() # release previous video writer
187
- # if vid_cap: # video
188
- # fps = vid_cap.get(cv2.CAP_PROP_FPS)
189
- # w = int(vid_cap.get(cv2.CAP_PROP_FRAME_WIDTH))
190
- # h = int(vid_cap.get(cv2.CAP_PROP_FRAME_HEIGHT))
191
- # else: # stream
192
- # fps, w, h = 30, im0.shape[1], im0.shape[0]
193
- # save_path += '.mp4'
194
- # vid_writer = cv2.VideoWriter(save_path, cv2.VideoWriter_fourcc(*'mp4v'), fps, (w, h))
195
- # vid_writer.write(im0)
196
 
197
  if save_txt or save_img:
198
  s = f"\n{len(list(save_dir.glob('labels/*.txt')))} labels saved to {save_dir / 'labels'}" if save_txt else ''
199
- #print(f"Results saved to {save_dir}{s}")
200
 
201
  print(f'Done. ({time.time() - t0:.3f}s)')
202
- return bbox,save_path
203
 
204
  class options:
205
  def __init__(self, weights, source, img_size=640, conf_thres=0.1, iou_thres=0.45, device='',
@@ -226,19 +226,29 @@ class options:
226
  self.no_trace=no_trace
227
 
228
  def get_output(input_image):
229
- ### Numpy -> PIL
230
- input_image = Image.fromarray(input_image).convert('RGB')
231
- input_image.save(f"{BASE_DIR}/input/image.jpg")
232
- source = f"{BASE_DIR}/input"
233
- opt = options(weights='logo_detection.pt',source=source)
234
- bbox = None
235
- with torch.no_grad():
236
- bbox,output_path = detect(opt)
237
- if os.path.exists(output_path):
238
- return Image.open(output_path)
239
- else:
 
 
 
 
 
240
  return input_image
241
 
242
 
 
 
 
 
 
243
  demo = gr.Interface(fn=get_output, inputs="image", outputs="image")
244
  demo.launch(debug=True)
 
13
  from numpy import random
14
  import numpy as np
15
 
16
+ # Add safe globals for PyTorch 2.6+ loading
17
+ import numpy
18
+ torch.serialization.add_safe_globals([
19
+ numpy.core.multiarray._reconstruct,
20
+ numpy.core.multiarray.scalar,
21
+ numpy.core.numeric.True_,
22
+ numpy.core.numeric._frombuffer,
23
+ numpy.dtype,
24
+ numpy._globals._NoValue,
25
+ numpy.core.numeric.asarray,
26
+ numpy.ndarray
27
+ ])
28
+
29
  BASE_DIR = "/home/user/app"
30
  os.chdir(BASE_DIR)
31
+ os.makedirs(f"{BASE_DIR}/input", exist_ok=True)
 
32
  sys.path.append(f'{BASE_DIR}/yolov7')
33
  os.system("pip install yolov7-package==0.0.12")
34
 
 
47
 
48
 
49
  def detect(opt, save_img=False):
 
 
 
 
 
 
 
50
  from yolov7_package import Yolov7Detector
51
  from yolov7_package.models.experimental import attempt_load
52
  from yolov7_package.utils.general import check_img_size, check_requirements, check_imshow, non_max_suppression, apply_classifier, \
 
54
  from yolov7_package.utils.torch_utils import select_device, load_classifier, time_synchronized, TracedModel
55
  from yolov7_package.utils.datasets import LoadStreams, LoadImages
56
 
57
+ # Monkey patch the attempt_load function to use weights_only=False
58
+ original_torch_load = torch.load
59
+ def patched_torch_load(w, map_location=None):
60
+ return original_torch_load(w, map_location=map_location, weights_only=False)
61
+
62
+ # Apply the monkey patch
63
+ import yolov7_package.models.experimental
64
+ yolov7_package.models.experimental.torch.load = patched_torch_load
65
+
66
  bbox = {}
67
  source, weights, view_img, save_txt, imgsz, trace = opt.source, opt.weights, opt.view_img, opt.save_txt, opt.img_size, not opt.no_trace
68
  save_img = not opt.nosave and not source.endswith('.txt') # save inference images
 
79
  half = device.type != 'cpu' # half precision only supported on CUDA
80
 
81
  # Load model
82
+ try:
83
+ det = Yolov7Detector(weights=weights, traced=False)
84
+ model = attempt_load(weights, map_location=device) # load FP32 model
85
+ except Exception as e:
86
+ print(f"Error loading model: {e}")
87
+ # Fallback: try loading with weights_only=False directly
88
+ print("Trying fallback method to load model...")
89
+ model = torch.load(weights, map_location=device, weights_only=False)
90
+ det = None
91
+
92
  stride = int(model.stride.max()) # model stride
93
  imgsz = check_img_size(imgsz, s=stride) # check img_size
94
 
 
167
  if len(det):
168
  # Rescale boxes from img_size to im0 size
169
  det[:, :4] = scale_coords(img.shape[2:], det[:, :4], im0.shape).round()
 
170
  bbox[f"{txt_path.split('/')[4]}"]=(det[:, :4]).numpy()
171
 
172
  # Print results
 
189
  # Print time (inference + NMS)
190
  print(f'{s}Done. ({(1E3 * (t2 - t1)):.1f}ms) Inference, ({(1E3 * (t3 - t2)):.1f}ms) NMS')
191
 
 
 
 
 
 
192
  # Save results (image with detections)
193
  if save_img:
194
  if dataset.mode == 'image':
 
195
  cv2.imwrite(save_path, im0)
196
  print(f" The image with the result is saved in: {save_path}")
 
 
 
 
 
 
 
 
 
 
 
 
 
 
197
 
198
  if save_txt or save_img:
199
  s = f"\n{len(list(save_dir.glob('labels/*.txt')))} labels saved to {save_dir / 'labels'}" if save_txt else ''
 
200
 
201
  print(f'Done. ({time.time() - t0:.3f}s)')
202
+ return bbox, save_path
203
 
204
  class options:
205
  def __init__(self, weights, source, img_size=640, conf_thres=0.1, iou_thres=0.45, device='',
 
226
  self.no_trace=no_trace
227
 
228
  def get_output(input_image):
229
+ try:
230
+ # Numpy -> PIL
231
+ input_image = Image.fromarray(input_image).convert('RGB')
232
+ input_image.save(f"{BASE_DIR}/input/image.jpg")
233
+ source = f"{BASE_DIR}/input"
234
+ opt = options(weights='logo_detection.pt', source=source)
235
+ bbox = None
236
+ with torch.no_grad():
237
+ bbox, output_path = detect(opt)
238
+ if os.path.exists(output_path):
239
+ return Image.open(output_path)
240
+ else:
241
+ return input_image
242
+ except Exception as e:
243
+ print(f"Error in get_output: {e}")
244
+ # Return the original image if there's an error
245
  return input_image
246
 
247
 
248
+ # Add debug prints to check environment
249
+ print(f"PyTorch version: {torch.__version__}")
250
+ print(f"Current working directory: {os.getcwd()}")
251
+ print(f"Files in directory: {os.listdir('.')}")
252
+
253
  demo = gr.Interface(fn=get_output, inputs="image", outputs="image")
254
  demo.launch(debug=True)