Samuelblue commited on
Commit
8abc43f
·
verified ·
1 Parent(s): 267bd4b

Create app.py

Browse files
Files changed (1) hide show
  1. app.py +309 -0
app.py ADDED
@@ -0,0 +1,309 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import gradio as gr
2
+ import json
3
+ from difflib import Differ
4
+ import ffmpeg
5
+ import os
6
+ from pathlib import Path
7
+ import time
8
+ import aiohttp
9
+ import asyncio
10
+
11
+ # Set true if you're using huggingface inference API API https://huggingface.co/inference-api
12
+ API_BACKEND = True
13
+ # MODEL = 'facebook/wav2vec2-large-960h-lv60-self'
14
+ # MODEL = "facebook/wav2vec2-large-960h"
15
+ MODEL = "facebook/wav2vec2-base-960h"
16
+ # MODEL = "patrickvonplaten/wav2vec2-large-960h-lv60-self-4-gram"
17
+ if API_BACKEND:
18
+ from dotenv import load_dotenv
19
+ import base64
20
+ import asyncio
21
+ load_dotenv(Path(".env"))
22
+
23
+ HF_TOKEN = os.environ["HF_TOKEN"]
24
+ headers = {"Authorization": f"Bearer {HF_TOKEN}"}
25
+ API_URL = f'https://api-inference.huggingface.co/models/{MODEL}'
26
+
27
+ else:
28
+ import torch
29
+ from transformers import pipeline
30
+
31
+ # is cuda available?
32
+ cuda = torch.device(
33
+ 'cuda:0') if torch.cuda.is_available() else torch.device('cpu')
34
+ device = 0 if torch.cuda.is_available() else -1
35
+ speech_recognizer = pipeline(
36
+ task="automatic-speech-recognition",
37
+ model=f'{MODEL}',
38
+ tokenizer=f'{MODEL}',
39
+ framework="pt",
40
+ device=device,
41
+ )
42
+
43
+ videos_out_path = Path("./videos_out")
44
+ videos_out_path.mkdir(parents=True, exist_ok=True)
45
+
46
+ samples_data = sorted(Path('examples').glob('*.json'))
47
+ SAMPLES = []
48
+ for file in samples_data:
49
+ with open(file) as f:
50
+ sample = json.load(f)
51
+ SAMPLES.append(sample)
52
+ VIDEOS = list(map(lambda x: [x['video']], SAMPLES))
53
+
54
+ total_inferences_since_reboot = 415
55
+ total_cuts_since_reboot = 1539
56
+
57
+
58
+ async def speech_to_text(video_file_path):
59
+ """
60
+ Takes a video path to convert to audio, transcribe audio channel to text and char timestamps
61
+
62
+ Using https://huggingface.co/tasks/automatic-speech-recognition pipeline
63
+ """
64
+ global total_inferences_since_reboot
65
+ if (video_file_path == None):
66
+ raise ValueError("Error no video input")
67
+
68
+ video_path = Path(video_file_path)
69
+ try:
70
+ # convert video to audio 16k using PIPE to audio_memory
71
+ audio_memory, _ = ffmpeg.input(video_path).output(
72
+ '-', format="wav", ac=1, ar='16k').overwrite_output().global_args('-loglevel', 'quiet').run(capture_stdout=True)
73
+ except Exception as e:
74
+ raise RuntimeError("Error converting video to audio")
75
+
76
+ ping("speech_to_text")
77
+ last_time = time.time()
78
+ if API_BACKEND:
79
+ # Using Inference API https://huggingface.co/inference-api
80
+ # try twice, because the model must be loaded
81
+ for i in range(10):
82
+ for tries in range(4):
83
+ print(f'Transcribing from API attempt {tries}')
84
+ try:
85
+ inference_reponse = await query_api(audio_memory)
86
+ print(inference_reponse)
87
+ transcription = inference_reponse["text"].lower()
88
+ timestamps = [[chunk["text"].lower(), chunk["timestamp"][0], chunk["timestamp"][1]]
89
+ for chunk in inference_reponse['chunks']]
90
+
91
+ total_inferences_since_reboot += 1
92
+ print("\n\ntotal_inferences_since_reboot: ",
93
+ total_inferences_since_reboot, "\n\n")
94
+ return (transcription, transcription, timestamps)
95
+ except Exception as e:
96
+ print(e)
97
+ if 'error' in inference_reponse and 'estimated_time' in inference_reponse:
98
+ wait_time = inference_reponse['estimated_time']
99
+ print("Waiting for model to load....", wait_time)
100
+ # wait for loading model
101
+ # 5 seconds plus for certanty
102
+ await asyncio.sleep(wait_time + 5.0)
103
+ elif 'error' in inference_reponse:
104
+ raise RuntimeError("Error Fetching API",
105
+ inference_reponse['error'])
106
+ else:
107
+ break
108
+ else:
109
+ raise RuntimeError(inference_reponse, "Error Fetching API")
110
+ else:
111
+
112
+ try:
113
+ print(f'Transcribing via local model')
114
+ output = speech_recognizer(
115
+ audio_memory, return_timestamps="char", chunk_length_s=10, stride_length_s=(4, 2))
116
+
117
+ transcription = output["text"].lower()
118
+ timestamps = [[chunk["text"].lower(), chunk["timestamp"][0].tolist(), chunk["timestamp"][1].tolist()]
119
+ for chunk in output['chunks']]
120
+ total_inferences_since_reboot += 1
121
+
122
+ print("\n\ntotal_inferences_since_reboot: ",
123
+ total_inferences_since_reboot, "\n\n")
124
+ return (transcription, transcription, timestamps)
125
+ except Exception as e:
126
+ raise RuntimeError("Error Running inference with local model", e)
127
+
128
+
129
+ async def cut_timestamps_to_video(video_in, transcription, text_in, timestamps):
130
+ """
131
+ Given original video input, text transcript + timestamps,
132
+ and edit ext cuts video segments into a single video
133
+ """
134
+ global total_cuts_since_reboot
135
+
136
+ video_path = Path(video_in)
137
+ video_file_name = video_path.stem
138
+ if (video_in == None or text_in == None or transcription == None):
139
+ raise ValueError("Inputs undefined")
140
+
141
+ d = Differ()
142
+ # compare original transcription with edit text
143
+ diff_chars = d.compare(transcription, text_in)
144
+
145
+ # Include additions in the filtered list
146
+ filtered = list(filter(lambda x: x[0] != '-' and x[0] != '+', diff_chars))
147
+
148
+ # Update grouping logic to handle additions
149
+ idx = 0
150
+ grouped = {}
151
+ for (a, b) in zip(filtered, timestamps):
152
+ if a[0] != '-':
153
+ if idx in grouped:
154
+ grouped[idx].append(b)
155
+ else:
156
+ grouped[idx] = []
157
+ grouped[idx].append(b)
158
+ elif a[0] == '+':
159
+ idx += 1
160
+
161
+ # after grouping, gets the lower and upper start and time for each group
162
+ timestamps_to_cut = [[v[0][1], v[-1][2]] for v in grouped.values()]
163
+
164
+ between_str = '+'.join(
165
+ map(lambda t: f'between(t,{t[0]},{t[1]})', timestamps_to_cut))
166
+
167
+ if timestamps_to_cut:
168
+ video_file = ffmpeg.input(video_in)
169
+ video = video_file.video.filter(
170
+ "select", f'({between_str})').filter("setpts", "N/FRAME_RATE/TB")
171
+ audio = video_file.audio.filter(
172
+ "aselect", f'({between_str})').filter("asetpts", "N/SR/TB")
173
+
174
+ output_video = f'./videos_out/{video_file_name}.mp4'
175
+ ffmpeg.concat(video, audio, v=1, a=1).output(
176
+ output_video).overwrite_output().global_args('-loglevel', 'quiet').run()
177
+ else:
178
+ output_video = video_in
179
+
180
+ tokens = [(token[2:], token[0] if token[0] != " " else None)
181
+ for token in filtered]
182
+
183
+ total_cuts_since_reboot += 1
184
+ ping("video_cuts")
185
+ print("\n\ntotal_cuts_since_reboot: ", total_cuts_since_reboot, "\n\n")
186
+ return (tokens, output_video)
187
+
188
+
189
+ async def query_api(audio_bytes: bytes):
190
+ """
191
+ Query for Huggingface Inference API for Automatic Speech Recognition task
192
+ """
193
+ payload = json.dumps({
194
+ "inputs": base64.b64encode(audio_bytes).decode("utf-8"),
195
+ "parameters": {
196
+ "return_timestamps": "char",
197
+ "chunk_length_s": 10,
198
+ "stride_length_s": [4, 2]
199
+ },
200
+ "options": {"use_gpu": False}
201
+ }).encode("utf-8")
202
+ async with aiohttp.ClientSession() as session:
203
+ async with session.post(API_URL, headers=headers, data=payload) as response:
204
+ print("API Response: ", response.status)
205
+ if response.headers['Content-Type'] == 'application/json':
206
+ return await response.json()
207
+ elif response.headers['Content-Type'] == 'application/octet-stream':
208
+ return await response.read()
209
+ elif response.headers['Content-Type'] == 'text/plain':
210
+ return await response.text()
211
+ else:
212
+ raise RuntimeError("Error Fetching API")
213
+
214
+
215
+ def ping(name):
216
+ url = f'https://huggingface.co/api/telemetry/spaces/radames/edit-video-by-editing-text/{name}'
217
+ print("ping: ", url)
218
+
219
+ async def req():
220
+ async with aiohttp.ClientSession() as session:
221
+ async with session.get(url) as response:
222
+ print("pong: ", response.status)
223
+ asyncio.create_task(req())
224
+
225
+
226
+ # ---- Gradio Layout -----
227
+ video_in = gr.Video(label="Video file", elem_id="video-container")
228
+ text_in = gr.Textbox(label="Transcription", lines=10, interactive=True)
229
+ video_out = gr.Video(label="Video Out")
230
+ diff_out = gr.HighlightedText(label="Cuts Diffs", combine_adjacent=True)
231
+ examples = gr.Dataset(components=[video_in], samples=VIDEOS, type="index")
232
+
233
+ css = """
234
+ #cut_btn, #reset_btn { align-self:stretch; }
235
+ #\\31 3 { max-width: 540px; }
236
+ .output-markdown {max-width: 65ch !important;}
237
+ #video-container{
238
+ max-width: 40rem;
239
+ }
240
+ """
241
+ with gr.Blocks(css=css) as demo:
242
+ transcription_var = gr.State()
243
+ timestamps_var = gr.State()
244
+ with gr.Row():
245
+ with gr.Column():
246
+ gr.Markdown("""
247
+ # Edit Video By Editing Text
248
+ This project is a quick proof of concept of a simple video editor where the edits
249
+ are made by editing the audio transcription.
250
+ Using the [Huggingface Automatic Speech Recognition Pipeline](https://huggingface.co/tasks/automatic-speech-recognition)
251
+ with a fine tuned [Wav2Vec2 model using Connectionist Temporal Classification (CTC)](https://huggingface.co/facebook/wav2vec2-large-960h-lv60-self)
252
+ you can predict not only the text transcription but also the [character or word base timestamps](https://huggingface.co/docs/transformers/v4.19.2/en/main_classes/pipelines#transformers.AutomaticSpeechRecognitionPipeline.__call__.return_timestamps)
253
+ """)
254
+
255
+ with gr.Row():
256
+
257
+ examples.render()
258
+
259
+ def load_example(id):
260
+ video = SAMPLES[id]['video']
261
+ transcription = SAMPLES[id]['transcription'].lower()
262
+ timestamps = SAMPLES[id]['timestamps']
263
+
264
+ return (video, transcription, transcription, timestamps)
265
+
266
+ examples.click(
267
+ load_example,
268
+ inputs=[examples],
269
+ outputs=[video_in, text_in, transcription_var, timestamps_var],
270
+ queue=False)
271
+ with gr.Row():
272
+ with gr.Column():
273
+ video_in.render()
274
+ transcribe_btn = gr.Button("Transcribe Audio")
275
+ transcribe_btn.click(speech_to_text, [video_in], [
276
+ text_in, transcription_var, timestamps_var])
277
+
278
+ with gr.Row():
279
+ gr.Markdown("""
280
+ ### Now edit as text
281
+ After running the video transcription, you can make cuts to the text below (only cuts, not additions!)""")
282
+
283
+ with gr.Row():
284
+ with gr.Column():
285
+ text_in.render()
286
+ with gr.Row():
287
+ cut_btn = gr.Button("Cut to video", elem_id="cut_btn")
288
+ # send audio path and hidden variables
289
+ cut_btn.click(cut_timestamps_to_video, [
290
+ video_in, transcription_var, text_in, timestamps_var], [diff_out, video_out])
291
+
292
+ reset_transcription = gr.Button(
293
+ "Reset to last trascription", elem_id="reset_btn")
294
+ reset_transcription.click(
295
+ lambda x: x, transcription_var, text_in)
296
+ with gr.Column():
297
+ video_out.render()
298
+ diff_out.render()
299
+ with gr.Row():
300
+ gr.Markdown("""
301
+ #### Video Credits
302
+
303
+ 1. [Cooking](https://vimeo.com/573792389)
304
+ 1. [Shia LaBeouf "Just Do It"](https://www.youtube.com/watch?v=n2lTxIk_Dr0)
305
+ 1. [Mark Zuckerberg & Yuval Noah Harari in Conversation](https://www.youtube.com/watch?v=Boj9eD0Wug8)
306
+ """)
307
+ demo.queue()
308
+ if __name__ == "__main__":
309
+ demo.launch(debug=True)