Spaces:
Paused
Paused
Update web-demos/hugging_face/app.py
Browse files- web-demos/hugging_face/app.py +45 -37
web-demos/hugging_face/app.py
CHANGED
@@ -65,70 +65,78 @@ def get_prompt(click_state, click_input):
|
|
65 |
# extract frames from upload video
|
66 |
|
67 |
def get_frames_from_video(video_input, video_state):
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
68 |
video_path = video_input
|
69 |
frames = []
|
70 |
user_name = time.time()
|
71 |
status_ok = True
|
72 |
operation_log = [("[Must Do]", "Click image"), (": Video uploaded! Try to click the image shown in step2 to add masks.\n", None)]
|
73 |
try:
|
74 |
-
#
|
75 |
probe = ffmpeg.probe(video_path)
|
76 |
-
|
77 |
-
|
78 |
-
|
79 |
-
|
80 |
-
|
81 |
-
with tempfile.TemporaryDirectory() as tmpdir:
|
82 |
-
frame_pattern = os.path.join(tmpdir, 'frame_%05d.png')
|
83 |
-
(
|
84 |
-
ffmpeg
|
85 |
-
.input(video_path)
|
86 |
-
.output(frame_pattern, start_number=0, vsync=0, qscale=0)
|
87 |
-
.run(quiet=True)
|
88 |
-
)
|
89 |
-
extracted = sorted(os.listdir(tmpdir))
|
90 |
-
for file in extracted:
|
91 |
-
img = Image.open(os.path.join(tmpdir, file)).convert("RGB")
|
92 |
-
frames.append(np.array(img))
|
93 |
|
94 |
-
|
95 |
-
|
96 |
-
if len(frames) >= 500:
|
97 |
operation_log = [("You uploaded a video with more than 500 frames. Stop the video extraction. Kindly lower the video frame rate to a value below 500. We highly recommend deploying the demo locally for long video processing.", "Error")]
|
98 |
status_ok = False
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
99 |
|
|
|
100 |
except Exception as e:
|
101 |
status_ok = False
|
102 |
print("read_frame_source:{} error. {}\n".format(video_path, str(e)))
|
|
|
103 |
|
104 |
-
if frames
|
105 |
-
operation_log = [(f"Video uploaded! Try to click the image shown in step2 to add masks. (You uploaded a video with a size of {
|
106 |
|
107 |
video_state = {
|
108 |
"user_name": user_name,
|
109 |
"video_name": os.path.split(video_path)[-1],
|
110 |
-
"origin_images": frames,
|
111 |
-
"painted_images":
|
112 |
-
"masks": [np.zeros((
|
113 |
-
"logits": [None]*len(frames),
|
114 |
"select_frame_number": 0,
|
115 |
"fps": fps
|
116 |
}
|
117 |
|
118 |
video_info = "Video Name: {},\nFPS: {},\nTotal Frames: {},\nImage Size:{}".format(
|
119 |
-
video_state["video_name"], round(video_state["fps"], 0), len(frames), (
|
120 |
)
|
121 |
-
model.samcontroler.sam_controler.reset_image()
|
122 |
model.samcontroler.sam_controler.set_image(video_state["origin_images"][0])
|
123 |
|
124 |
-
return video_state, video_info,
|
125 |
-
|
126 |
-
|
127 |
-
|
128 |
-
|
129 |
-
|
130 |
-
|
131 |
-
|
|
|
|
|
|
|
132 |
|
133 |
# get the select frame from gradio slider
|
134 |
def select_template(image_selection_slider, video_state, interactive_state, mask_dropdown):
|
|
|
65 |
# extract frames from upload video
|
66 |
|
67 |
def get_frames_from_video(video_input, video_state):
|
68 |
+
"""
|
69 |
+
Args:
|
70 |
+
video_path:str
|
71 |
+
timestamp:float64
|
72 |
+
Return
|
73 |
+
19 элементов для Gradio-интерфейса
|
74 |
+
"""
|
75 |
video_path = video_input
|
76 |
frames = []
|
77 |
user_name = time.time()
|
78 |
status_ok = True
|
79 |
operation_log = [("[Must Do]", "Click image"), (": Video uploaded! Try to click the image shown in step2 to add masks.\n", None)]
|
80 |
try:
|
81 |
+
# Считываем видео с помощью ffmpeg
|
82 |
probe = ffmpeg.probe(video_path)
|
83 |
+
video_info_ff = next(stream for stream in probe['streams'] if stream['codec_type'] == 'video')
|
84 |
+
fps = eval(video_info_ff['r_frame_rate']) # Например, '25/1'
|
85 |
+
width = int(video_info_ff['width'])
|
86 |
+
height = int(video_info_ff['height'])
|
87 |
+
total_frames = int(video_info_ff['nb_frames'])
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
88 |
|
89 |
+
if total_frames >= 500:
|
|
|
|
|
90 |
operation_log = [("You uploaded a video with more than 500 frames. Stop the video extraction. Kindly lower the video frame rate to a value below 500. We highly recommend deploying the demo locally for long video processing.", "Error")]
|
91 |
status_ok = False
|
92 |
+
return [None] * 19 # Возвращаем заглушки
|
93 |
+
else:
|
94 |
+
out, _ = (
|
95 |
+
ffmpeg
|
96 |
+
.input(video_path)
|
97 |
+
.output('pipe:', format='rawvideo', pix_fmt='rgb24')
|
98 |
+
.run(capture_stdout=True, capture_stderr=True)
|
99 |
+
)
|
100 |
+
video = np.frombuffer(out, np.uint8).reshape([-1, height, width, 3])
|
101 |
+
frames = [Image.fromarray(f) for f in video]
|
102 |
|
103 |
+
print(f'Inp video shape: t_{len(frames)}, s_{width}x{height}')
|
104 |
except Exception as e:
|
105 |
status_ok = False
|
106 |
print("read_frame_source:{} error. {}\n".format(video_path, str(e)))
|
107 |
+
return [None] * 19
|
108 |
|
109 |
+
if frames[0].size[1] > 720 or frames[0].size[0] > 720:
|
110 |
+
operation_log = [(f"Video uploaded! Try to click the image shown in step2 to add masks. (You uploaded a video with a size of {width}x{height}, and the length of its longest edge exceeds 720 pixels. We may resize the input video during processing.)", "Normal")]
|
111 |
|
112 |
video_state = {
|
113 |
"user_name": user_name,
|
114 |
"video_name": os.path.split(video_path)[-1],
|
115 |
+
"origin_images": [np.array(f) for f in frames],
|
116 |
+
"painted_images": [np.array(f) for f in frames],
|
117 |
+
"masks": [np.zeros((height, width), np.uint8)] * len(frames),
|
118 |
+
"logits": [None] * len(frames),
|
119 |
"select_frame_number": 0,
|
120 |
"fps": fps
|
121 |
}
|
122 |
|
123 |
video_info = "Video Name: {},\nFPS: {},\nTotal Frames: {},\nImage Size:{}".format(
|
124 |
+
video_state["video_name"], round(video_state["fps"], 0), len(frames), (width, height)
|
125 |
)
|
126 |
+
model.samcontroler.sam_controler.reset_image()
|
127 |
model.samcontroler.sam_controler.set_image(video_state["origin_images"][0])
|
128 |
|
129 |
+
return video_state, video_info, frames[0], \
|
130 |
+
gr.update(visible=status_ok, maximum=len(frames), value=1), \
|
131 |
+
gr.update(visible=status_ok, maximum=len(frames), value=len(frames)), \
|
132 |
+
gr.update(visible=status_ok), gr.update(visible=status_ok), \
|
133 |
+
gr.update(visible=status_ok), gr.update(visible=status_ok), \
|
134 |
+
gr.update(visible=status_ok), gr.update(visible=status_ok), \
|
135 |
+
gr.update(visible=status_ok), gr.update(visible=status_ok), \
|
136 |
+
gr.update(visible=status_ok), gr.update(visible=status_ok), \
|
137 |
+
gr.update(visible=status_ok), gr.update(visible=status_ok, choices=[], value=[]), \
|
138 |
+
gr.update(visible=True, value=operation_log), \
|
139 |
+
gr.update(visible=status_ok, value=operation_log)
|
140 |
|
141 |
# get the select frame from gradio slider
|
142 |
def select_template(image_selection_slider, video_state, interactive_state, mask_dropdown):
|