AI_Powered_Video_Editing / gen_ai_project1.py
Varshitha2317's picture
Upload gen_ai_project1.py
7547a78 verified
# -*- coding: utf-8 -*-
"""Gen AI Project1
Automatically generated by Colab.
Original file is located at
https://colab.research.google.com/drive/1Q27-bhi-hIw4U_QKiDXy3bwfLjPOI02o
"""
#AI Powered Video Editing
#YOLOv8 (Ultralytics YOLOv8n)
!pip install datasets
!pip install ultralytics
from datasets import load_dataset
from moviepy.editor import ImageSequenceClip
from ultralytics import YOLO
import os
import cv2
from PIL import Image
# Load dataset
dataset = load_dataset("VarunB31990/Video-Editing-Dataset")
# Load YOLO model
model = YOLO("yolov8n.pt")
# Directory for images
image_dir = "images/"
os.makedirs(image_dir, exist_ok=True)
processed_dir = "processed_frames/"
os.makedirs(processed_dir, exist_ok=True)
# Process images and run YOLO
processed_paths = []
for i, item in enumerate(dataset["train"]):
if "original_image" in item:
image = item["original_image"]
image_path = os.path.join(image_dir, f"frame_{i}.jpg")
image.save(image_path)
# Run YOLO on the image
results = model(image_path)
for result in results:
im_array = result.plot() # Get YOLO detections
im = Image.fromarray(im_array)
detected_path = os.path.join(processed_dir, f"detected_{i}.jpg")
im.save(detected_path)
processed_paths.append(detected_path)
# Create video from processed images
if len(processed_paths)>1:
clip = ImageSequenceClip(processed_paths, fps=10)
clip.write_videofile("yolo_detection_video.mp4", codec="libx264", fps=10)
print("🎥 Video created: yolo_detection_video.mp4")
else:
print("⚠️ Not enough images to create a video.")
from IPython.display import display, Video
display(Video("yolo_detection_video.mp4", embed=True))