rosebe commited on
Commit
39f55d8
·
1 Parent(s): ce27bb4

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +145 -5
app.py CHANGED
@@ -1,3 +1,4 @@
 
1
  # import gradio as gr
2
  # import torch
3
 
@@ -22,13 +23,152 @@
22
 
23
  import gradio as gr
24
  import torch
 
 
 
 
 
 
 
 
 
 
 
 
 
25
 
26
- model = torch.hub.load('ultralytics/yolov5', 'custom', path='best.pt')
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
27
 
28
- # Define the face detector function
29
- def detect_image(image):
30
- results = model(image)
31
- return results.render()[0]
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
32
 
33
  # Create Gradio interfaces for different modes
34
  img_interface = gr.Interface(
 
1
+ # old code
2
  # import gradio as gr
3
  # import torch
4
 
 
23
 
24
  import gradio as gr
25
  import torch
26
+ import cv2
27
+ # from IPython.display import clear_output
28
+ # import os, urllib.request
29
+ # import subprocess
30
+ # from roboflow import Roboflow
31
+ # import json
32
+ # from time import sleep
33
+ # from PIL import Image, ImageDraw
34
+ # import io
35
+ # import base64
36
+ # import requests
37
+ # from os.path import exists
38
+ # import sys, re, glob
39
 
40
+ # model = torch.hub.load('ultralytics/yolov5', 'custom', path='best.pt')
41
+ # rf = Roboflow(api_key="affmrRA3zyr34kAQF3sJ")
42
+ # project = rf.workspace().project("ecosmart-pxc0t")
43
+ # dataset = project.version(4).model
44
+
45
+ # # Define the face detector function
46
+ # def detect_image(image):
47
+ # results = model(image)
48
+
49
+ # return results.render()[0]
50
+
51
+ # def detect_video(video):
52
+ # HOME = os.path.expanduser("~")
53
+ # pathDoneCMD = f'{HOME}/doneCMD.sh'
54
+ # if not os.path.exists(f"{HOME}/.ipython/ttmg.py"):
55
+ # hCode = "https://raw.githubusercontent.com/yunooooo/gcct/master/res/ttmg.py"
56
+ # urllib.request.urlretrieve(hCode, f"{HOME}/.ipython/ttmg.py")
57
+
58
+ # from ttmg import (
59
+ # loadingAn,
60
+ # textAn,
61
+ # )
62
+
63
+ # os.chdir("/content/")
64
+ # os.makedirs("videos_to_infer", exist_ok=True)
65
+ # os.makedirs("inferred_videos", exist_ok=True)
66
+ # os.chdir("videos_to_infer")
67
+ # os.environ['inputFile'] = video.name
68
+ # command = ['ffmpeg', '-hide_banner', '-loglevel', 'error', '-i', input_file, '-vf', 'fps=2', output_pattern]
69
+ # subprocess.run(command)
70
+
71
+ # subprocess.run(['pip', 'install', 'roboflow'])
72
+ # install_roboflow()
73
+ # model = version.model
74
+ # print(model)
75
+
76
+ # file_path = "/content/videos_to_infer/"
77
+ # extention = ".png"
78
+ # globbed_files = sorted(glob.glob(file_path + '*' + extention))
79
+ # print(globbed_files)
80
+ # for image in globbed_files:
81
+ # # INFERENCE
82
+ # predictions = model.predict(image).json()['predictions']
83
+ # newly_rendered_image = Image.open(image)
84
+
85
+ # # RENDER
86
+ # # for each detection, create a crop and convert into CLIP encoding
87
+ # print(predictions)
88
+ # for prediction in predictions:
89
+ # # rip bounding box coordinates from current detection
90
+ # # note: infer returns center points of box as (x,y) and width, height
91
+ # # ----- but pillow crop requires the top left and bottom right points to crop
92
+ # x0 = prediction['x'] - prediction['width'] / 2
93
+ # x1 = prediction['x'] + prediction['width'] / 2
94
+ # y0 = prediction['y'] - prediction['height'] / 2
95
+ # y1 = prediction['y'] + prediction['height'] / 2
96
+ # box = (x0, y0, x1, y1)
97
+
98
+ # newly_rendered_image = draw_boxes(box, x0, y0, newly_rendered_image, prediction['class'])
99
+
100
+ # # WRITE
101
+ # save_with_bbox_renders(newly_rendered_image)
102
+
103
+ # # Run ffmpeg command
104
+ # subprocess.run(['ffmpeg', '-r', '8', '-s', '1920x1080', '-i', '/content/inferred_videos/YOUR_VIDEO_FILE_out%04d.png', '-vcodec', 'libx264', '-crf', '25', '-pix_fmt', 'yuv420p', 'test.mp4'])
105
+ # # Call the function to execute the commands
106
+ # execute_commands()
107
+
108
+
109
+
110
+ # def draw_boxes(box, x0, y0, img, class_name):
111
+ # bbox = ImageDraw.Draw(img)
112
+
113
+ # bbox.rectangle(box, outline =color_map[class_name], width=5)
114
+ # bbox.text((x0, y0), class_name, fill='black', anchor='mm')
115
+
116
+ # return img
117
+
118
+ # def save_with_bbox_renders(img):
119
+ # file_name = os.path.basename(img.filename)
120
+ # img.save('/content/inferred_videos/' + file_name)
121
+
122
+
123
+
124
+ # loadingAn(name="lds")
125
+ # textAn("Installing Dependencies...", ty='twg')
126
+ # os.system('pip install git+git://github.com/AWConant/jikanpy.git')
127
+ # os.system('add-apt-repository -y ppa:jonathonf/ffmpeg-4')
128
+ # os.system('apt-get update')
129
+ # os.system('apt install mediainfo')
130
+ # os.system('apt-get install ffmpeg')
131
+ # clear_output()
132
+ # print('Installation finished.')
133
+
134
+
135
+ def detect_video(video):
136
+ video = cv2.VideoCapture(video_path)
137
+
138
+ frame_count = 0
139
+ while True:
140
+
141
+ success, frame = video.read()
142
+ if not success:
143
+ break
144
+ frame = model.predict();
145
+ frame_output_path = f'frame_{frame_count}.jpg' # Replace with your desired output path
146
+ cv2.imwrite(frame_output_path, frame)
147
+
148
+ frame_count += 1
149
+
150
+ video.release()
151
+ cv2.destroyAllWindows()
152
 
153
+ frame_rate = 30 # Adjust as needed
154
+
155
+ image_dir = 'path_to_image_directory' # Replace with the directory containing the image files
156
+ image_files = sorted(os.listdir(image_dir))
157
+
158
+ image_path = os.path.join(image_dir, image_files[0])
159
+ frame = cv2.imread(image_path)
160
+ height, width, _ = frame.shape
161
+
162
+ video_output_path = 'output_video.mp4' # Replace with your desired output video path
163
+ fourcc = cv2.VideoWriter_fourcc(*'mp4v') # You can change the codec as needed
164
+ video_writer = cv2.VideoWriter(video_output_path, fourcc, frame_rate, (width, height))
165
+
166
+ for image_file in image_files:
167
+ image_path = os.path.join(image_dir, image_file)
168
+ frame = cv2.imread(image_path)
169
+ video_writer.write(frame)
170
+
171
+ video_writer.release()
172
 
173
  # Create Gradio interfaces for different modes
174
  img_interface = gr.Interface(