Deepsheka commited on
Commit
9c4cffb
·
1 Parent(s): b7abe4e

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +302 -2
app.py CHANGED
@@ -1,7 +1,14 @@
1
  import gradio as gr
2
  from pytube import YouTube
3
  import whisper
4
-
 
 
 
 
 
 
 
5
  # define function for transcription
6
  def whisper_transcript(model_size,audio_file):
7
  if url:
@@ -53,4 +60,297 @@ gradio_ui = gr.Interface(
53
  outputs=gr.outputs.Textbox(label="Whisper Transcript"),
54
  )
55
 
56
- gradio_ui.queue().launch()
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
  import gradio as gr
2
  from pytube import YouTube
3
  import whisper
4
+ import json
5
+ from difflib import Differ
6
+ import ffmpeg
7
+ import os
8
+ from pathlib import Path
9
+ import time
10
+ import aiohttp
11
+ import asyncio
12
  # define function for transcription
13
  def whisper_transcript(model_size,audio_file):
14
  if url:
 
60
  outputs=gr.outputs.Textbox(label="Whisper Transcript"),
61
  )
62
 
63
+ gradio_ui.queue().launch()
64
+
65
+
66
+
67
+
68
+ # Set true if you're using huggingface inference API API https://huggingface.co/inference-api
69
+ API_BACKEND = True
70
+ # MODEL = 'facebook/wav2vec2-large-960h-lv60-self'
71
+ # MODEL = "facebook/wav2vec2-large-960h"
72
+ MODEL = "facebook/wav2vec2-base-960h"
73
+ # MODEL = "patrickvonplaten/wav2vec2-large-960h-lv60-self-4-gram"
74
+ if API_BACKEND:
75
+ from dotenv import load_dotenv
76
+ import base64
77
+ import asyncio
78
+ load_dotenv(Path(".env"))
79
+
80
+ HF_TOKEN = os.environ["HF_TOKEN"]
81
+ headers = {"Authorization": f"Bearer {HF_TOKEN}"}
82
+ API_URL = f'https://api-inference.huggingface.co/models/{MODEL}'
83
+
84
+ else:
85
+ import torch
86
+ from transformers import pipeline
87
+
88
+ # is cuda available?
89
+ cuda = torch.device(
90
+ 'cuda:0') if torch.cuda.is_available() else torch.device('cpu')
91
+ device = 0 if torch.cuda.is_available() else -1
92
+ speech_recognizer = pipeline(
93
+ task="automatic-speech-recognition",
94
+ model=f'{MODEL}',
95
+ tokenizer=f'{MODEL}',
96
+ framework="pt",
97
+ device=device,
98
+ )
99
+
100
+ videos_out_path = Path("./videos_out")
101
+ videos_out_path.mkdir(parents=True, exist_ok=True)
102
+
103
+ samples_data = sorted(Path('examples').glob('*.json'))
104
+ SAMPLES = []
105
+ for file in samples_data:
106
+ with open(file) as f:
107
+ sample = json.load(f)
108
+ SAMPLES.append(sample)
109
+ VIDEOS = list(map(lambda x: [x['video']], SAMPLES))
110
+
111
+ total_inferences_since_reboot = 415
112
+ total_cuts_since_reboot = 1539
113
+
114
+
115
+ async def speech_to_text(video_file_path):
116
+ """
117
+ Takes a video path to convert to audio, transcribe audio channel to text and char timestamps
118
+ Using https://huggingface.co/tasks/automatic-speech-recognition pipeline
119
+ """
120
+ global total_inferences_since_reboot
121
+ if(video_file_path == None):
122
+ raise ValueError("Error no video input")
123
+
124
+ video_path = Path(video_file_path)
125
+ try:
126
+ # convert video to audio 16k using PIPE to audio_memory
127
+ audio_memory, _ = ffmpeg.input(video_path).output(
128
+ '-', format="wav", ac=1, ar='16k').overwrite_output().global_args('-loglevel', 'quiet').run(capture_stdout=True)
129
+ except Exception as e:
130
+ raise RuntimeError("Error converting video to audio")
131
+
132
+ ping("speech_to_text")
133
+ last_time = time.time()
134
+ if API_BACKEND:
135
+ # Using Inference API https://huggingface.co/inference-api
136
+ # try twice, because the model must be loaded
137
+ for i in range(10):
138
+ for tries in range(4):
139
+ print(f'Transcribing from API attempt {tries}')
140
+ try:
141
+ inference_reponse = await query_api(audio_memory)
142
+ transcription = inference_reponse["text"].lower()
143
+ timestamps = [[chunk["text"].lower(), chunk["timestamp"][0], chunk["timestamp"][1]]
144
+ for chunk in inference_reponse['chunks']]
145
+
146
+ total_inferences_since_reboot += 1
147
+ print("\n\ntotal_inferences_since_reboot: ",
148
+ total_inferences_since_reboot, "\n\n")
149
+ return (transcription, transcription, timestamps)
150
+ except:
151
+ if 'error' in inference_reponse and 'estimated_time' in inference_reponse:
152
+ wait_time = inference_reponse['estimated_time']
153
+ print("Waiting for model to load....", wait_time)
154
+ # wait for loading model
155
+ # 5 seconds plus for certanty
156
+ await asyncio.sleep(wait_time + 5.0)
157
+ elif 'error' in inference_reponse:
158
+ raise RuntimeError("Error Fetching API",
159
+ inference_reponse['error'])
160
+ else:
161
+ break
162
+ else:
163
+ raise RuntimeError(inference_reponse, "Error Fetching API")
164
+ else:
165
+
166
+ try:
167
+ print(f'Transcribing via local model')
168
+ output = speech_recognizer(
169
+ audio_memory, return_timestamps="char", chunk_length_s=10, stride_length_s=(4, 2))
170
+
171
+ transcription = output["text"].lower()
172
+ timestamps = [[chunk["text"].lower(), chunk["timestamp"][0].tolist(), chunk["timestamp"][1].tolist()]
173
+ for chunk in output['chunks']]
174
+ total_inferences_since_reboot += 1
175
+
176
+ print("\n\ntotal_inferences_since_reboot: ",
177
+ total_inferences_since_reboot, "\n\n")
178
+ return (transcription, transcription, timestamps)
179
+ except Exception as e:
180
+ raise RuntimeError("Error Running inference with local model", e)
181
+
182
+
183
+ async def cut_timestamps_to_video(video_in, transcription, text_in, timestamps):
184
+ """
185
+ Given original video input, text transcript + timestamps,
186
+ and edit ext cuts video segments into a single video
187
+ """
188
+ global total_cuts_since_reboot
189
+
190
+ video_path = Path(video_in)
191
+ video_file_name = video_path.stem
192
+ if(video_in == None or text_in == None or transcription == None):
193
+ raise ValueError("Inputs undefined")
194
+
195
+ d = Differ()
196
+ # compare original transcription with edit text
197
+ diff_chars = d.compare(transcription, text_in)
198
+ # remove all text aditions from diff
199
+ filtered = list(filter(lambda x: x[0] != '+', diff_chars))
200
+
201
+ # filter timestamps to be removed
202
+ # timestamps_to_cut = [b for (a,b) in zip(filtered, timestamps_var) if a[0]== '-' ]
203
+ # return diff tokes and cutted video!!
204
+
205
+ # groupping character timestamps so there are less cuts
206
+ idx = 0
207
+ grouped = {}
208
+ for(a, b) in zip(filtered, timestamps):
209
+ if a[0] != '-':
210
+ if idx in grouped:
211
+ grouped[idx].append(b)
212
+ else:
213
+ grouped[idx] = []
214
+ grouped[idx].append(b)
215
+ else:
216
+ idx += 1
217
+
218
+ # after grouping, gets the lower and upter start and time for each group
219
+ timestamps_to_cut = [[v[0][1], v[-1][2]] for v in grouped.values()]
220
+
221
+ between_str = '+'.join(
222
+ map(lambda t: f'between(t,{t[0]},{t[1]})', timestamps_to_cut))
223
+
224
+ if timestamps_to_cut:
225
+ video_file = ffmpeg.input(video_in)
226
+ video = video_file.video.filter(
227
+ "select", f'({between_str})').filter("setpts", "N/FRAME_RATE/TB")
228
+ audio = video_file.audio.filter(
229
+ "aselect", f'({between_str})').filter("asetpts", "N/SR/TB")
230
+
231
+ output_video = f'./videos_out/{video_file_name}.mp4'
232
+ ffmpeg.concat(video, audio, v=1, a=1).output(
233
+ output_video).overwrite_output().global_args('-loglevel', 'quiet').run()
234
+ else:
235
+ output_video = video_in
236
+
237
+ tokens = [(token[2:], token[0] if token[0] != " " else None)
238
+ for token in filtered]
239
+
240
+ total_cuts_since_reboot += 1
241
+ ping("video_cuts")
242
+ print("\n\ntotal_cuts_since_reboot: ", total_cuts_since_reboot, "\n\n")
243
+ return (tokens, output_video)
244
+
245
+
246
+ async def query_api(audio_bytes: bytes):
247
+ """
248
+ Query for Huggingface Inference API for Automatic Speech Recognition task
249
+ """
250
+ payload = json.dumps({
251
+ "inputs": base64.b64encode(audio_bytes).decode("utf-8"),
252
+ "parameters": {
253
+ "return_timestamps": "char",
254
+ "chunk_length_s": 10,
255
+ "stride_length_s": [4, 2]
256
+ },
257
+ "options": {"use_gpu": False}
258
+ }).encode("utf-8")
259
+ async with aiohttp.ClientSession() as session:
260
+ async with session.post(API_URL, headers=headers, data=payload) as response:
261
+ return await response.json()
262
+
263
+
264
+ def ping(name):
265
+ url = f'https://huggingface.co/api/telemetry/spaces/radames/edit-video-by-editing-text/{name}'
266
+ print("ping: ", url)
267
+
268
+ async def req():
269
+ async with aiohttp.ClientSession() as session:
270
+ async with session.get(url) as response:
271
+ print("pong: ", response.status)
272
+ asyncio.create_task(req())
273
+
274
+
275
+ # ---- Gradio Layout -----
276
+ video_in = gr.Video(label="Video file")
277
+ text_in = gr.Textbox(label="Transcription", lines=10, interactive=True)
278
+ video_out = gr.Video(label="Video Out")
279
+ diff_out = gr.HighlightedText(label="Cuts Diffs", combine_adjacent=True)
280
+ examples = gr.components.Dataset(
281
+ components=[video_in], samples=VIDEOS, type="index")
282
+
283
+ demo = gr.Blocks(enable_queue=True, css='''
284
+ #cut_btn, #reset_btn { align-self:stretch; }
285
+ #\\31 3 { max-width: 540px; }
286
+ .output-markdown {max-width: 65ch !important;}
287
+ ''')
288
+ demo.encrypt = False
289
+ with demo:
290
+ transcription_var = gr.Variable()
291
+ timestamps_var = gr.Variable()
292
+ with gr.Row():
293
+ with gr.Column():
294
+ gr.Markdown('''
295
+ # Edit Video By Editing Text
296
+ This project is a quick proof of concept of a simple video editor where the edits
297
+ are made by editing the audio transcription.
298
+ Using the [Huggingface Automatic Speech Recognition Pipeline](https://huggingface.co/tasks/automatic-speech-recognition)
299
+ with a fine tuned [Wav2Vec2 model using Connectionist Temporal Classification (CTC)](https://huggingface.co/facebook/wav2vec2-large-960h-lv60-self)
300
+ you can predict not only the text transcription but also the [character or word base timestamps](https://huggingface.co/docs/transformers/v4.19.2/en/main_classes/pipelines#transformers.AutomaticSpeechRecognitionPipeline.__call__.return_timestamps)
301
+ ''')
302
+
303
+ with gr.Row():
304
+
305
+ examples.render()
306
+
307
+ def load_example(id):
308
+ video = SAMPLES[id]['video']
309
+ transcription = SAMPLES[id]['transcription'].lower()
310
+ timestamps = SAMPLES[id]['timestamps']
311
+
312
+ return (video, transcription, transcription, timestamps)
313
+
314
+ examples.click(
315
+ load_example,
316
+ inputs=[examples],
317
+ outputs=[video_in, text_in, transcription_var, timestamps_var],
318
+ queue=False)
319
+ with gr.Row():
320
+ with gr.Column():
321
+ video_in.render()
322
+ transcribe_btn = gr.Button("Transcribe Audio")
323
+ transcribe_btn.click(speech_to_text, [video_in], [
324
+ text_in, transcription_var, timestamps_var])
325
+
326
+ with gr.Row():
327
+ gr.Markdown('''
328
+ ### Now edit as text
329
+ After running the video transcription, you can make cuts to the text below (only cuts, not additions!)''')
330
+
331
+ with gr.Row():
332
+ with gr.Column():
333
+ text_in.render()
334
+ with gr.Row():
335
+ cut_btn = gr.Button("Cut to video", elem_id="cut_btn")
336
+ # send audio path and hidden variables
337
+ cut_btn.click(cut_timestamps_to_video, [
338
+ video_in, transcription_var, text_in, timestamps_var], [diff_out, video_out])
339
+
340
+ reset_transcription = gr.Button(
341
+ "Reset to last trascription", elem_id="reset_btn")
342
+ reset_transcription.click(
343
+ lambda x: x, transcription_var, text_in)
344
+ with gr.Column():
345
+ video_out.render()
346
+ diff_out.render()
347
+ with gr.Row():
348
+ gr.Markdown('''
349
+ #### Video Credits
350
+ 1. [Cooking](https://vimeo.com/573792389)
351
+ 1. [Shia LaBeouf "Just Do It"](https://www.youtube.com/watch?v=n2lTxIk_Dr0)
352
+ 1. [Mark Zuckerberg & Yuval Noah Harari in Conversation](https://www.youtube.com/watch?v=Boj9eD0Wug8)
353
+ ''')
354
+
355
+ if __name__ == "__main__":
356
+ demo.launch(debug=True)