Spaces:
Runtime error
Runtime error
# old code | |
# import gradio as gr | |
# import torch | |
# model = torch.hub.load('ultralytics/yolov5', 'custom', path='best.pt') | |
# Define the face detector function | |
# def detect_faces(image): | |
# # Loading in yolov5s - you can switch to larger models such as yolov5m or yolov5l, or smaller such as yolov5n | |
# results = model(image) | |
# return results.render()[0] | |
# # Create a Gradio interface | |
# iface = gr.Interface(fn=detect_faces, inputs=gr.Image(source="webcam", tool =None), outputs="image") | |
# # Launch the interface | |
# iface.launch(debug=True) | |
# demo = gr.TabbedInterface([img_demo, vid_demo], ["Image", "Video"]) | |
# if __name__ == "__main__": | |
# demo.launch() | |
import gradio as gr | |
import torch | |
import cv2 | |
# from IPython.display import clear_output | |
# import os, urllib.request | |
# import subprocess | |
# from roboflow import Roboflow | |
# import json | |
# from time import sleep | |
# from PIL import Image, ImageDraw | |
# import io | |
# import base64 | |
# import requests | |
# from os.path import exists | |
# import sys, re, glob | |
# model = torch.hub.load('ultralytics/yolov5', 'custom', path='best.pt') | |
# rf = Roboflow(api_key="affmrRA3zyr34kAQF3sJ") | |
# project = rf.workspace().project("ecosmart-pxc0t") | |
# dataset = project.version(4).model | |
# def detect_video(video): | |
# HOME = os.path.expanduser("~") | |
# pathDoneCMD = f'{HOME}/doneCMD.sh' | |
# if not os.path.exists(f"{HOME}/.ipython/ttmg.py"): | |
# hCode = "https://raw.githubusercontent.com/yunooooo/gcct/master/res/ttmg.py" | |
# urllib.request.urlretrieve(hCode, f"{HOME}/.ipython/ttmg.py") | |
# from ttmg import ( | |
# loadingAn, | |
# textAn, | |
# ) | |
# os.chdir("/content/") | |
# os.makedirs("videos_to_infer", exist_ok=True) | |
# os.makedirs("inferred_videos", exist_ok=True) | |
# os.chdir("videos_to_infer") | |
# os.environ['inputFile'] = video.name | |
# command = ['ffmpeg', '-hide_banner', '-loglevel', 'error', '-i', input_file, '-vf', 'fps=2', output_pattern] | |
# subprocess.run(command) | |
# subprocess.run(['pip', 'install', 'roboflow']) | |
# install_roboflow() | |
# model = version.model | |
# print(model) | |
# file_path = "/content/videos_to_infer/" | |
# extention = ".png" | |
# globbed_files = sorted(glob.glob(file_path + '*' + extention)) | |
# print(globbed_files) | |
# for image in globbed_files: | |
# # INFERENCE | |
# predictions = model.predict(image).json()['predictions'] | |
# newly_rendered_image = Image.open(image) | |
# # RENDER | |
# # for each detection, create a crop and convert into CLIP encoding | |
# print(predictions) | |
# for prediction in predictions: | |
# # rip bounding box coordinates from current detection | |
# # note: infer returns center points of box as (x,y) and width, height | |
# # ----- but pillow crop requires the top left and bottom right points to crop | |
# x0 = prediction['x'] - prediction['width'] / 2 | |
# x1 = prediction['x'] + prediction['width'] / 2 | |
# y0 = prediction['y'] - prediction['height'] / 2 | |
# y1 = prediction['y'] + prediction['height'] / 2 | |
# box = (x0, y0, x1, y1) | |
# newly_rendered_image = draw_boxes(box, x0, y0, newly_rendered_image, prediction['class']) | |
# # WRITE | |
# save_with_bbox_renders(newly_rendered_image) | |
# # Run ffmpeg command | |
# subprocess.run(['ffmpeg', '-r', '8', '-s', '1920x1080', '-i', '/content/inferred_videos/YOUR_VIDEO_FILE_out%04d.png', '-vcodec', 'libx264', '-crf', '25', '-pix_fmt', 'yuv420p', 'test.mp4']) | |
# # Call the function to execute the commands | |
# execute_commands() | |
# def draw_boxes(box, x0, y0, img, class_name): | |
# bbox = ImageDraw.Draw(img) | |
# bbox.rectangle(box, outline =color_map[class_name], width=5) | |
# bbox.text((x0, y0), class_name, fill='black', anchor='mm') | |
# return img | |
# def save_with_bbox_renders(img): | |
# file_name = os.path.basename(img.filename) | |
# img.save('/content/inferred_videos/' + file_name) | |
# loadingAn(name="lds") | |
# textAn("Installing Dependencies...", ty='twg') | |
# os.system('pip install git+git://github.com/AWConant/jikanpy.git') | |
# os.system('add-apt-repository -y ppa:jonathonf/ffmpeg-4') | |
# os.system('apt-get update') | |
# os.system('apt install mediainfo') | |
# os.system('apt-get install ffmpeg') | |
# clear_output() | |
# print('Installation finished.') | |
# Define the face detector function | |
def detect_image(image): | |
results = model(image) | |
return results.render()[0] | |
def detect_video(video_path): | |
video = cv2.VideoCapture(video_path) | |
frame_count = 0 | |
while True: | |
success, frame = video.read() | |
if not success: | |
break | |
frame = model.predict(); | |
frame_output_path = f'frame_{frame_count}.jpg' # Replace with your desired output path | |
cv2.imwrite(frame_output_path, frame) | |
frame_count += 1 | |
video.release() | |
cv2.destroyAllWindows() | |
frame_rate = 30 # Adjust as needed | |
image_dir = 'path_to_image_directory' # Replace with the directory containing the image files | |
image_files = sorted(os.listdir(image_dir)) | |
image_path = os.path.join(image_dir, image_files[0]) | |
frame = cv2.imread(image_path) | |
height, width, _ = frame.shape | |
video_output_path = 'output_video.mp4' # Replace with your desired output video path | |
fourcc = cv2.VideoWriter_fourcc(*'mp4v') # You can change the codec as needed | |
video_writer = cv2.VideoWriter(video_output_path, fourcc, frame_rate, (width, height)) | |
for image_file in image_files: | |
image_path = os.path.join(image_dir, image_file) | |
frame = cv2.imread(image_path) | |
video_writer.write(frame) | |
video_writer.release() | |
return video_writer | |
# Create Gradio interfaces for different modes | |
img_interface = gr.Interface( | |
fn=detect_image, | |
inputs=gr.inputs.Image(source="upload"), | |
outputs="image", | |
title="Image" | |
) | |
vid_interface = gr.Interface( | |
fn=detect_video, | |
inputs=gr.inputs.Video(source="upload"), | |
outputs="video", | |
title="Video" | |
) | |
# Create a list of interfaces | |
interfaces = [img_interface, vid_interface] | |
# Create the tabbed interface | |
tabbed_interface = gr.TabbedInterface(interfaces, ["Image", "Video"]) | |
# Launch the tabbed interface | |
tabbed_interface.launch(debug=True) |