Spaces:
Sleeping
Sleeping
File size: 4,597 Bytes
a7bb0d5 4c9c347 a7bb0d5 3ba663a 3c7b6f6 b21431b a7bb0d5 715cfb3 a7bb0d5 4c9c347 a7bb0d5 b21431b a7bb0d5 4f265a7 a7bb0d5 322ebf1 a7bb0d5 4f265a7 a7bb0d5 b21431b 3ba663a a7bb0d5 b21431b a7bb0d5 b21431b 3ba663a a7bb0d5 b21431b 4f265a7 a7bb0d5 bbe7bfb |
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 |
import gradio as gr
import subprocess
import shutil
import os
is_shared_ui = True if "fffiloni/Go-With-The-Flow" in os.environ['SPACE_ID'] else False
from huggingface_hub import snapshot_download
# Define the folder name
folder_name = "lora_models"
# Create the folder
os.makedirs(folder_name, exist_ok=True)
# Download models
snapshot_download(
repo_id = "Eyeline-Research/Go-with-the-Flow",
local_dir = folder_name
)
def process_video(video_path, prompt, num_steps, degradation_level):
output_folder="noise_warp_output_folder"
if os.path.exists(output_folder):
# Delete the folder and its contents
shutil.rmtree(output_folder)
output_video="output.mp4"
device="cuda"
try:
# Step 1: Warp the noise
warp_command = [
"python", "make_warped_noise.py", video_path,
"--output_folder", output_folder
]
subprocess.run(warp_command, check=True)
warped_vid_path = os.path.join(output_folder, "input.mp4")
# Step 2: Run inference
inference_command = [
"python", "cut_and_drag_inference.py", output_folder,
"--prompt", prompt,
"--degradation", degradation_level,
"--output_mp4_path", output_video,
"--device", device,
"--num_inference_steps", str(num_steps)
]
subprocess.run(inference_command, check=True)
# Return the path to the output video
return output_video
except subprocess.CalledProcessError as e:
raise gr.Error(f"An error occurred: {str(e)}")
css="""
div#follow-div{
text-decoration: none !important;
display: flex;
column-gap: 5px;
font-size: 0.8em;
}
"""
with gr.Blocks(css=css) as demo:
with gr.Column():
gr.Markdown("# Go-With-The-Flow • Cut and Drag")
gr.HTML("""
<div style="display:flex;column-gap:4px;">
<a href="https://github.com/Eyeline-Research/Go-with-the-Flow">
<img src='https://img.shields.io/badge/GitHub-Repo-blue'>
</a>
<a href="https://arxiv.org/abs/2501.08331">
<img src='https://img.shields.io/badge/ArXiv-Paper-red'>
</a>
<a href="https://eyeline-research.github.io/Go-with-the-Flow/">
<img src='https://img.shields.io/badge/Project-Page-green'>
</a>
<a href="https://huggingface.co/spaces/fffiloni/Go-With-The-Flow?duplicate=true">
<img src="https://huggingface.co/datasets/huggingface/badges/resolve/main/duplicate-this-space-sm.svg" alt="Duplicate this Space">
</a>
</div>
""")
with gr.Row():
with gr.Column():
input_video = gr.Video(label="Input Video")
prompt = gr.Textbox(label="Prompt")
with gr.Row():
if is_shared_ui:
num_steps = gr.Slider(label="Inference Steps", minimum=1, maximum=30, value=5, step=1, interactive=False)
degradation = gr.Slider(label="Noise Degradation", minimum=0, maximum=1, value=0, step=0.1, interactive=False)
else:
num_steps = gr.Slider(label="Inference Steps", minimum=1, maximum=30, value=5, step=1, interactive=True)
degradation = gr.Slider(label="Noise Degradation", minimum=0, maximum=1, value=0, step=0.1, interactive=True)
submit_btn = gr.Button("Submit")
gr.Examples(
examples = [
["./examples/example_1.mp4", "yellow plastic dick is swimming and jumping in the water"],
["./examples/example_2.mp4", "the car starts and go forward to the end of the street"]
],
inputs = [input_video, prompt]
)
with gr.Column():
output_video = gr.Video(label="Result")
gr.HTML("""
<div id="follow-div">
<a href="https://huggingface.co/fffiloni">
<img src="https://huggingface.co/datasets/huggingface/badges/resolve/main/follow-me-on-HF-sm-dark.svg" alt="Follow me on HF">
</a>
<p>for space updates</p>
""")
submit_btn.click(
fn = process_video,
inputs = [input_video, prompt, num_steps, degradation],
outputs = [output_video]
)
demo.queue().launch(show_api=False) |