AlshimaaGamalAlsaied commited on
Commit
8e4ec6f
Β·
1 Parent(s): f4e1b73
Files changed (1) hide show
  1. app.py +91 -34
app.py CHANGED
@@ -1,21 +1,12 @@
1
  import gradio as gr
2
  import torch
3
  import yolov5
4
- import subprocess
5
- import tempfile
6
- import time
7
- from pathlib import Path
8
- import uuid
9
- import cv2
10
- import gradio as gr
11
-
12
-
13
 
14
  # Images
15
- #torch.hub.download_url_to_file('https://github.com/ultralytics/yolov5/raw/master/data/images/zidane.jpg', 'zidane.jpg')
16
- #torch.hub.download_url_to_file('https://raw.githubusercontent.com/obss/sahi/main/tests/data/small-vehicles1.jpeg', 'small-vehicles1.jpeg')
17
-
18
- def image_fn(
19
  image: gr.inputs.Image = None,
20
  model_path: gr.inputs.Dropdown = None,
21
  image_size: gr.inputs.Slider = 640,
@@ -33,36 +24,102 @@ def image_fn(
33
  Returns:
34
  Rendered image
35
  """
36
-
37
- model = yolov5.load(model_path, device="cpu", hf_model=True, trace=False)
38
  model.conf = conf_threshold
39
  model.iou = iou_threshold
40
  results = model([image], size=image_size)
41
  return results.render()[0]
42
-
 
 
 
 
 
 
 
 
43
 
 
 
 
44
 
 
45
  demo_app = gr.Interface(
46
- fn=image_fn,
47
- inputs=[
48
- gr.inputs.Image(type="pil", label="Input Image"),
49
- gr.inputs.Dropdown(
50
- choices=[
51
- "alshimaa/yolo5_epoch100",
52
- #"kadirnar/yolov7-v0.1",
53
- ],
54
- default="alshimaa/yolo5_epoch100",
55
- label="Model",
56
- )
57
- #gr.inputs.Slider(minimum=320, maximum=1280, default=640, step=32, label="Image Size")
58
- #gr.inputs.Slider(minimum=0.0, maximum=1.0, default=0.25, step=0.05, label="Confidence Threshold"),
59
- #gr.inputs.Slider(minimum=0.0, maximum=1.0, default=0.45, step=0.05, label="IOU Threshold")
60
- ],
61
- outputs=gr.outputs.Image(type="filepath", label="Output Image"),
62
- title="Object Detector: Identify People Without Mask",
63
- examples=[['img1.png', 'alshimaa/yolo5_epoch100', 640, 0.25, 0.45], ['img2.png', 'alshimaa/yolo5_epoch100', 640, 0.25, 0.45], ['img3.png', 'alshimaa/yolo5_epoch100', 640, 0.25, 0.45]],
64
  cache_examples=True,
65
  live=True,
66
  theme='huggingface',
67
  )
68
  demo_app.launch(debug=True, enable_queue=True)
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
  import gradio as gr
2
  import torch
3
  import yolov5
 
 
 
 
 
 
 
 
 
4
 
5
  # Images
6
+ torch.hub.download_url_to_file('https://github.com/ultralytics/yolov5/raw/master/data/images/zidane.jpg', 'zidane.jpg')
7
+ torch.hub.download_url_to_file('https://raw.githubusercontent.com/WongKinYiu/yolov7/main/inference/images/image3.jpg', 'image3.jpg')
8
+
9
+ def yolov5_inference(
10
  image: gr.inputs.Image = None,
11
  model_path: gr.inputs.Dropdown = None,
12
  image_size: gr.inputs.Slider = 640,
 
24
  Returns:
25
  Rendered image
26
  """
27
+ model = yolov5.load(model_path, device="cpu")
 
28
  model.conf = conf_threshold
29
  model.iou = iou_threshold
30
  results = model([image], size=image_size)
31
  return results.render()[0]
32
+
33
+
34
+ inputs = [
35
+ gr.inputs.Image(type="pil", label="Input Image"),
36
+ gr.inputs.Dropdown(["yolov5s.pt", "alshimaa/yolo5_epoch100"], label="Model"),
37
+ gr.inputs.Slider(minimum=320, maximum=1280, default=640, step=32, label="Image Size"),
38
+ gr.inputs.Slider(minimum=0.0, maximum=1.0, default=0.25, step=0.05, label="Confidence Threshold"),
39
+ gr.inputs.Slider(minimum=0.0, maximum=1.0, default=0.45, step=0.05, label="IOU Threshold"),
40
+ ]
41
 
42
+ outputs = gr.outputs.Image(type="filepath", label="Output Image")
43
+ title = "YOLOv5"
44
+ description = "YOLOv5 is a family of object detection models pretrained on COCO dataset. This model is a pip implementation of the original YOLOv5 model."
45
 
46
+ examples = [['zidane.jpg', 'yolov5s.pt', 640, 0.25, 0.45], ['image3.jpg', 'yolov5s.pt', 640, 0.25, 0.45]]
47
  demo_app = gr.Interface(
48
+ fn=yolov5_inference,
49
+ inputs=inputs,
50
+ outputs=outputs,
51
+ title=title,
52
+ examples=examples,
 
 
 
 
 
 
 
 
 
 
 
 
 
53
  cache_examples=True,
54
  live=True,
55
  theme='huggingface',
56
  )
57
  demo_app.launch(debug=True, enable_queue=True)
58
+ # import gradio as gr
59
+ # import torch
60
+ # import yolov5
61
+ # import subprocess
62
+ # import tempfile
63
+ # import time
64
+ # from pathlib import Path
65
+ # import uuid
66
+ # import cv2
67
+ # import gradio as gr
68
+
69
+
70
+
71
+ # # Images
72
+ # #torch.hub.download_url_to_file('https://github.com/ultralytics/yolov5/raw/master/data/images/zidane.jpg', 'zidane.jpg')
73
+ # #torch.hub.download_url_to_file('https://raw.githubusercontent.com/obss/sahi/main/tests/data/small-vehicles1.jpeg', 'small-vehicles1.jpeg')
74
+
75
+ # def image_fn(
76
+ # image: gr.inputs.Image = None,
77
+ # model_path: gr.inputs.Dropdown = None,
78
+ # image_size: gr.inputs.Slider = 640,
79
+ # conf_threshold: gr.inputs.Slider = 0.25,
80
+ # iou_threshold: gr.inputs.Slider = 0.45,
81
+ # ):
82
+ # """
83
+ # YOLOv5 inference function
84
+ # Args:
85
+ # image: Input image
86
+ # model_path: Path to the model
87
+ # image_size: Image size
88
+ # conf_threshold: Confidence threshold
89
+ # iou_threshold: IOU threshold
90
+ # Returns:
91
+ # Rendered image
92
+ # """
93
+
94
+ # model = yolov5.load(model_path, device="cpu", hf_model=True, trace=False)
95
+ # model.conf = conf_threshold
96
+ # model.iou = iou_threshold
97
+ # results = model([image], size=image_size)
98
+ # return results.render()[0]
99
+
100
+
101
+
102
+ # demo_app = gr.Interface(
103
+ # fn=image_fn,
104
+ # inputs=[
105
+ # gr.inputs.Image(type="pil", label="Input Image"),
106
+ # gr.inputs.Dropdown(
107
+ # choices=[
108
+ # "alshimaa/yolo5_epoch100",
109
+ # #"kadirnar/yolov7-v0.1",
110
+ # ],
111
+ # default="alshimaa/yolo5_epoch100",
112
+ # label="Model",
113
+ # )
114
+ # #gr.inputs.Slider(minimum=320, maximum=1280, default=640, step=32, label="Image Size")
115
+ # #gr.inputs.Slider(minimum=0.0, maximum=1.0, default=0.25, step=0.05, label="Confidence Threshold"),
116
+ # #gr.inputs.Slider(minimum=0.0, maximum=1.0, default=0.45, step=0.05, label="IOU Threshold")
117
+ # ],
118
+ # outputs=gr.outputs.Image(type="filepath", label="Output Image"),
119
+ # title="Object Detector: Identify People Without Mask",
120
+ # examples=[['img1.png', 'alshimaa/yolo5_epoch100', 640, 0.25, 0.45], ['img2.png', 'alshimaa/yolo5_epoch100', 640, 0.25, 0.45], ['img3.png', 'alshimaa/yolo5_epoch100', 640, 0.25, 0.45]],
121
+ # cache_examples=True,
122
+ # live=True,
123
+ # theme='huggingface',
124
+ # )
125
+ # demo_app.launch(debug=True, enable_queue=True)