goryhon commited on
Commit
3ce5158
·
verified ·
1 Parent(s): ee250bb

Update web-demos/hugging_face/app.py

Browse files
Files changed (1) hide show
  1. web-demos/hugging_face/app.py +42 -47
web-demos/hugging_face/app.py CHANGED
@@ -70,7 +70,7 @@ def get_frames_from_video(video_input, video_state):
70
  video_path:str
71
  timestamp:float64
72
  Return
73
- 19 элементов для Gradio-интерфейса
74
  """
75
  video_path = video_input
76
  frames = []
@@ -78,65 +78,60 @@ def get_frames_from_video(video_input, video_state):
78
  status_ok = True
79
  operation_log = [("[Must Do]", "Click image"), (": Video uploaded! Try to click the image shown in step2 to add masks.\n", None)]
80
  try:
81
- # Считываем видео с помощью ffmpeg
82
- probe = ffmpeg.probe(video_path)
83
- video_info_ff = next(stream for stream in probe['streams'] if stream['codec_type'] == 'video')
84
- fps = eval(video_info_ff['r_frame_rate']) # Например, '25/1'
85
- width = int(video_info_ff['width'])
86
- height = int(video_info_ff['height'])
87
- total_frames = int(video_info_ff['nb_frames'])
88
-
89
- if total_frames >= 500:
90
  operation_log = [("You uploaded a video with more than 500 frames. Stop the video extraction. Kindly lower the video frame rate to a value below 500. We highly recommend deploying the demo locally for long video processing.", "Error")]
 
 
 
 
91
  status_ok = False
92
- return [None] * 19 # Возвращаем заглушки
93
  else:
94
- out, _ = (
95
- ffmpeg
96
- .input(video_path)
97
- .output('pipe:', format='rawvideo', pix_fmt='rgb24')
98
- .run(capture_stdout=True, capture_stderr=True)
99
- )
100
- video = np.frombuffer(out, np.uint8).reshape([-1, height, width, 3])
101
- frames = [Image.fromarray(f) for f in video]
102
-
103
- print(f'Inp video shape: t_{len(frames)}, s_{width}x{height}')
104
- except Exception as e:
 
 
 
105
  status_ok = False
106
  print("read_frame_source:{} error. {}\n".format(video_path, str(e)))
107
- return [None] * 19
108
-
109
- if frames[0].size[1] > 720 or frames[0].size[0] > 720:
110
- operation_log = [(f"Video uploaded! Try to click the image shown in step2 to add masks. (You uploaded a video with a size of {width}x{height}, and the length of its longest edge exceeds 720 pixels. We may resize the input video during processing.)", "Normal")]
111
 
112
  video_state = {
113
  "user_name": user_name,
114
  "video_name": os.path.split(video_path)[-1],
115
- "origin_images": [np.array(f) for f in frames],
116
- "painted_images": [np.array(f) for f in frames],
117
- "masks": [np.zeros((height, width), np.uint8)] * len(frames),
118
- "logits": [None] * len(frames),
119
  "select_frame_number": 0,
120
  "fps": fps
121
- }
122
-
123
- video_info = "Video Name: {},\nFPS: {},\nTotal Frames: {},\nImage Size:{}".format(
124
- video_state["video_name"], round(video_state["fps"], 0), len(frames), (width, height)
125
- )
126
  model.samcontroler.sam_controler.reset_image()
127
  model.samcontroler.sam_controler.set_image(video_state["origin_images"][0])
128
-
129
- return video_state, video_info, frames[0], \
130
- gr.update(visible=status_ok, maximum=len(frames), value=1), \
131
- gr.update(visible=status_ok, maximum=len(frames), value=len(frames)), \
132
- gr.update(visible=status_ok), gr.update(visible=status_ok), \
133
- gr.update(visible=status_ok), gr.update(visible=status_ok), \
134
- gr.update(visible=status_ok), gr.update(visible=status_ok), \
135
- gr.update(visible=status_ok), gr.update(visible=status_ok), \
136
- gr.update(visible=status_ok), gr.update(visible=status_ok), \
137
- gr.update(visible=status_ok), gr.update(visible=status_ok, choices=[], value=[]), \
138
- gr.update(visible=True, value=operation_log), \
139
- gr.update(visible=status_ok, value=operation_log)
140
 
141
  # get the select frame from gradio slider
142
  def select_template(image_selection_slider, video_state, interactive_state, mask_dropdown):
 
70
  video_path:str
71
  timestamp:float64
72
  Return
73
+ [[0:nearest_frame], [nearest_frame:], nearest_frame]
74
  """
75
  video_path = video_input
76
  frames = []
 
78
  status_ok = True
79
  operation_log = [("[Must Do]", "Click image"), (": Video uploaded! Try to click the image shown in step2 to add masks.\n", None)]
80
  try:
81
+ cap = cv2.VideoCapture(video_path)
82
+ fps = cap.get(cv2.CAP_PROP_FPS)
83
+ length = int(cap.get(cv2.CAP_PROP_FRAME_COUNT))
84
+
85
+ if length >= 500:
 
 
 
 
86
  operation_log = [("You uploaded a video with more than 500 frames. Stop the video extraction. Kindly lower the video frame rate to a value below 500. We highly recommend deploying the demo locally for long video processing.", "Error")]
87
+ ret, frame = cap.read()
88
+ if ret == True:
89
+ original_h, original_w = frame.shape[:2]
90
+ frames.append(cv2.cvtColor(frame, cv2.COLOR_BGR2RGB))
91
  status_ok = False
 
92
  else:
93
+ while cap.isOpened():
94
+ ret, frame = cap.read()
95
+ if ret == True:
96
+ # resize input image
97
+ original_h, original_w = frame.shape[:2]
98
+ frames.append(cv2.cvtColor(frame, cv2.COLOR_BGR2RGB))
99
+ else:
100
+ break
101
+ t = len(frames)
102
+ if t > 0:
103
+ print(f'Inp video shape: t_{t}, s_{original_h}x{original_w}')
104
+ else:
105
+ print(f'Inp video shape: t_{t}, no input video!!!')
106
+ except (OSError, TypeError, ValueError, KeyError, SyntaxError) as e:
107
  status_ok = False
108
  print("read_frame_source:{} error. {}\n".format(video_path, str(e)))
109
+
110
+ # initialize video_state
111
+ if frames[0].shape[0] > 720 or frames[0].shape[1] > 720:
112
+ operation_log = [(f"Video uploaded! Try to click the image shown in step2 to add masks. (You uploaded a video with a size of {original_w}x{original_h}, and the length of its longest edge exceeds 720 pixels. We may resize the input video during processing.)", "Normal")]
113
 
114
  video_state = {
115
  "user_name": user_name,
116
  "video_name": os.path.split(video_path)[-1],
117
+ "origin_images": frames,
118
+ "painted_images": frames.copy(),
119
+ "masks": [np.zeros((original_h, original_w), np.uint8)]*len(frames),
120
+ "logits": [None]*len(frames),
121
  "select_frame_number": 0,
122
  "fps": fps
123
+ }
124
+ video_info = "Video Name: {},\nFPS: {},\nTotal Frames: {},\nImage Size:{}".format(video_state["video_name"], round(video_state["fps"], 0), length, (original_w, original_h))
 
 
 
125
  model.samcontroler.sam_controler.reset_image()
126
  model.samcontroler.sam_controler.set_image(video_state["origin_images"][0])
127
+ return video_state, video_info, video_state["origin_images"][0], gr.update(visible=status_ok, maximum=len(frames), value=1), gr.update(visible=status_ok, maximum=len(frames), value=len(frames)), \
128
+ gr.update(visible=status_ok), gr.update(visible=status_ok), \
129
+ gr.update(visible=status_ok), gr.update(visible=status_ok),\
130
+ gr.update(visible=status_ok), gr.update(visible=status_ok), \
131
+ gr.update(visible=status_ok), gr.update(visible=status_ok), \
132
+ gr.update(visible=status_ok), gr.update(visible=status_ok), \
133
+ gr.update(visible=status_ok), gr.update(visible=status_ok, choices=[], value=[]), \
134
+ gr.update(visible=True, value=operation_log), gr.update(visible=status_ok, value=operation_log)
 
 
 
 
135
 
136
  # get the select frame from gradio slider
137
  def select_template(image_selection_slider, video_state, interactive_state, mask_dropdown):