lucas-ventura commited on
Commit
f57efa9
·
verified ·
1 Parent(s): 7da7f34

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +16 -85
app.py CHANGED
@@ -1,4 +1,3 @@
1
- import os
2
  import tempfile
3
  from pathlib import Path
4
 
@@ -13,7 +12,6 @@ from src.data.utils_asr import PromptASR
13
  from src.models.llama_inference import inference
14
  from src.test.vidchapters import get_chapters
15
  from tools.download.models import download_base_model, download_model
16
- from tools.download.video import download_url
17
 
18
  # Set up proxies
19
  # from urllib.request import getproxies
@@ -155,80 +153,29 @@ def load_peft(model_name: str = "asr-10k"):
155
  return True
156
 
157
 
158
- def download_from_url(url, output_path):
159
- """Download a video from a URL using yt-dlp and save it to output_path."""
160
- try:
161
- # Import yt-dlp Python package
162
- try:
163
- import yt_dlp
164
- except ImportError:
165
- print("yt-dlp Python package is not installed")
166
- return (
167
- False,
168
- "yt-dlp Python package is not installed. Please install it with 'pip install yt-dlp'.",
169
- )
170
-
171
- # Configure yt-dlp options
172
- ydl_opts = {
173
- "format": "best",
174
- "outtmpl": str(output_path),
175
- "noplaylist": True,
176
- "quiet": True,
177
- }
178
-
179
- # Download the video
180
- with yt_dlp.YoutubeDL(ydl_opts) as ydl:
181
- ydl.download([url])
182
-
183
- # Check if the download was successful
184
- if not os.path.exists(output_path):
185
- return (
186
- False,
187
- "Download completed but video file not found. Please check the URL.",
188
- )
189
-
190
- return True, None
191
- except Exception as e:
192
- error_msg = f"Error downloading video: {str(e)}"
193
- print(error_msg)
194
- return False, error_msg
195
-
196
-
197
  @spaces.GPU
198
- def process_video(
199
- video_file, video_url="", model_name: str = "asr-10k", do_sample: bool = False
200
- ):
201
- """Process a video file or URL and generate chapters."""
202
  progress = gr.Progress()
203
  progress(0, desc="Starting...")
204
 
205
  # Check if we have a valid input
206
- if video_file is None and not video_url:
207
- return "Please upload a video file or provide a URL."
208
 
209
  # Load the PEFT model
210
  progress(0.1, desc=f"Loading LoRA parameters from {model_name}...")
211
  if not load_peft(model_name):
212
  return "Failed to load model. Please try again."
213
 
214
- # Create a temporary directory to save the uploaded or downloaded video
215
  with tempfile.TemporaryDirectory() as temp_dir:
216
  temp_video_path = Path(temp_dir) / "temp_video.mp4"
217
 
218
- if video_file is not None:
219
- # Using uploaded file
220
- progress(0.2, desc="Processing uploaded video...")
221
- with open(temp_video_path, "wb") as f:
222
- f.write(video_file)
223
- else:
224
- # Using URL
225
- progress(0.2, desc=f"Downloading video from URL: {video_url}...")
226
- # success, error_msg = download_from_url(video_url, temp_video_path)
227
- sucess, error_msg, video_paths = download_url(video_url, destinationDirectory=temp_dir)
228
- if not success:
229
- return f"Failed to download video: {error_msg}"
230
- assert len(video_paths) == 1, print(f"video_paths has len={len(video_paths)}")
231
- temp_video_path = video_paths[0]
232
 
233
  # Process the video
234
  progress(0.3, desc="Extracting ASR transcript...")
@@ -299,10 +246,8 @@ note_html = """
299
  # Clone the repository
300
  git clone https://github.com/lucas-ventura/chapter-llama.git
301
  cd chapter-llama
302
-
303
  # Install demo dependencies
304
  python -m pip install -e ".[demo]"
305
-
306
  # Launch the demo
307
  python demo.py</pre>
308
  <p style="font-size: 1.1em; color: #555; margin-bottom: 10px;">If you find any issues, please report them on our <a href="https://github.com/lucas-ventura/chapter-llama/issues" style="color: #8F68C3; text-decoration: none;">GitHub repository</a>.</p>
@@ -340,19 +285,6 @@ with gr.Blocks(title="Chapter-Llama", head=head) as demo:
340
  file_types=["video", "audio"],
341
  type="binary",
342
  )
343
- #with gr.Tab("Upload File"):
344
- # video_input = gr.File(
345
- # label="Upload Video or Audio File",
346
- # file_types=["video", "audio"],
347
- # type="binary",
348
- # )
349
-
350
- #with gr.Tab("Video URL"):
351
- # video_url_input = gr.Textbox(
352
- # label="YouTube or Video URL",
353
- # placeholder="https://youtube.com/watch?v=...",
354
- # )
355
- video_url_input = ""
356
 
357
  model_dropdown = gr.Dropdown(
358
  choices=["asr-10k", "asr-1k"],
@@ -367,19 +299,18 @@ with gr.Blocks(title="Chapter-Llama", head=head) as demo:
367
  with gr.Column():
368
  status_area = gr.Markdown("**Status:** Ready to process video")
369
  output_text = gr.Textbox(
370
- label="Generated Chapters", lines=12, interactive=False
371
  )
372
 
373
-
374
- def update_status_and_process(video_file, video_url, model_name, do_sample):
375
- if video_file is None and not video_url:
376
  return (
377
- "**Status:** No video uploaded or URL provided",
378
- "Please upload a video file or provide a URL.",
379
  )
380
  else:
381
  return "**Status:** Processing video...", process_video(
382
- video_file, video_url, model_name, do_sample
383
  )
384
 
385
  # Load the base model at startup
@@ -397,4 +328,4 @@ with gr.Blocks(title="Chapter-Llama", head=head) as demo:
397
 
398
  if __name__ == "__main__":
399
  # Launch the Gradio app
400
- demo.launch()
 
 
1
  import tempfile
2
  from pathlib import Path
3
 
 
12
  from src.models.llama_inference import inference
13
  from src.test.vidchapters import get_chapters
14
  from tools.download.models import download_base_model, download_model
 
15
 
16
  # Set up proxies
17
  # from urllib.request import getproxies
 
153
  return True
154
 
155
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
156
  @spaces.GPU
157
+ def process_video(video_file, model_name: str = "asr-10k", do_sample: bool = False):
158
+ """Process a video file and generate chapters."""
 
 
159
  progress = gr.Progress()
160
  progress(0, desc="Starting...")
161
 
162
  # Check if we have a valid input
163
+ if video_file is None:
164
+ return "Please upload a video file."
165
 
166
  # Load the PEFT model
167
  progress(0.1, desc=f"Loading LoRA parameters from {model_name}...")
168
  if not load_peft(model_name):
169
  return "Failed to load model. Please try again."
170
 
171
+ # Create a temporary directory to save the uploaded video
172
  with tempfile.TemporaryDirectory() as temp_dir:
173
  temp_video_path = Path(temp_dir) / "temp_video.mp4"
174
 
175
+ # Using uploaded file
176
+ progress(0.2, desc="Processing uploaded video...")
177
+ with open(temp_video_path, "wb") as f:
178
+ f.write(video_file)
 
 
 
 
 
 
 
 
 
 
179
 
180
  # Process the video
181
  progress(0.3, desc="Extracting ASR transcript...")
 
246
  # Clone the repository
247
  git clone https://github.com/lucas-ventura/chapter-llama.git
248
  cd chapter-llama
 
249
  # Install demo dependencies
250
  python -m pip install -e ".[demo]"
 
251
  # Launch the demo
252
  python demo.py</pre>
253
  <p style="font-size: 1.1em; color: #555; margin-bottom: 10px;">If you find any issues, please report them on our <a href="https://github.com/lucas-ventura/chapter-llama/issues" style="color: #8F68C3; text-decoration: none;">GitHub repository</a>.</p>
 
285
  file_types=["video", "audio"],
286
  type="binary",
287
  )
 
 
 
 
 
 
 
 
 
 
 
 
 
288
 
289
  model_dropdown = gr.Dropdown(
290
  choices=["asr-10k", "asr-1k"],
 
299
  with gr.Column():
300
  status_area = gr.Markdown("**Status:** Ready to process video")
301
  output_text = gr.Textbox(
302
+ label="Generated Chapters", lines=10, interactive=False
303
  )
304
 
305
+ def update_status_and_process(video_file, model_name, do_sample):
306
+ if video_file is None:
 
307
  return (
308
+ "**Status:** No video uploaded",
309
+ "Please upload a video file.",
310
  )
311
  else:
312
  return "**Status:** Processing video...", process_video(
313
+ video_file, model_name, do_sample
314
  )
315
 
316
  # Load the base model at startup
 
328
 
329
  if __name__ == "__main__":
330
  # Launch the Gradio app
331
+ demo.launch()