Ahmadkhan12 commited on
Commit
1b1d357
·
verified ·
1 Parent(s): f0ab573

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +39 -55
app.py CHANGED
@@ -2,15 +2,12 @@ import gradio as gr
2
  import moviepy.editor as mp
3
  import numpy as np
4
  from PIL import Image
 
5
  import os
6
  import traceback
7
- import sys
8
-
9
- # Redirect logs for Hugging Face
10
- sys.stdout = sys.stderr
11
 
12
  # Resize image while maintaining aspect ratio
13
- def resize_and_fit_image(img, target_size=(1280, 720), padding_color=(0, 0, 0)):
14
  width, height = img.size
15
  target_width, target_height = target_size
16
 
@@ -26,91 +23,78 @@ def resize_and_fit_image(img, target_size=(1280, 720), padding_color=(0, 0, 0)):
26
 
27
  img_resized = img.resize((new_width, new_height))
28
  final_img = Image.new('RGB', target_size, padding_color)
29
- final_img.paste(img_resized, ((target_width - new_width) // 2, (target_height - new_height) // 2))
 
 
30
 
31
  return final_img
32
 
33
- # Apply zoom effect to image clip
34
- def apply_zoom_effect(image_clip):
35
- return image_clip.resize(lambda t: 1 + 0.05 * t) # Gradual zoom effect
36
-
37
  # Video generation function
38
  def process_and_generate_video(audio_file, images):
 
 
39
  try:
40
- print("Starting video generation...")
41
-
42
- # Validate input files
43
- if not audio_file:
44
- raise ValueError("No audio file provided.")
45
- if not images or len(images) == 0:
46
- raise ValueError("No images provided.")
47
-
48
- # Handle audio file path
49
- if isinstance(audio_file, str):
50
- audio_path = audio_file
51
- else:
52
- raise ValueError("Audio file is not a valid file path.")
53
-
54
- # Load audio
55
- print(f"Audio file: {audio_path}")
56
- audio = mp.AudioFileClip(audio_path)
57
  audio_duration = audio.duration
58
- print(f"Audio duration: {audio_duration} seconds")
59
 
60
  # Process images
61
  image_clips = []
62
  image_duration = audio_duration / len(images)
63
-
64
  for img_path in images:
65
- print(f"Processing image: {img_path}")
66
- if isinstance(img_path, str):
67
- img = Image.open(img_path)
68
- else:
69
- raise ValueError("Image file is not a valid file path.")
70
-
71
- img = resize_and_fit_image(img, target_size=(1280, 720))
72
- img_clip = mp.ImageClip(np.array(img)).set_duration(image_duration).set_fps(30)
73
- img_clip = apply_zoom_effect(img_clip)
74
  image_clips.append(img_clip)
75
 
76
- print(f"Generated {len(image_clips)} image clips.")
77
 
78
  # Concatenate clips
79
  video = mp.concatenate_videoclips(image_clips, method="compose")
80
  video = video.set_audio(audio)
81
 
82
- # Save video
83
  output_path = "/tmp/generated_video.mp4"
84
- print(f"Saving video to {output_path}...")
85
  video.write_videofile(output_path, codec="libx264", audio_codec="aac")
 
86
 
87
- print("Video generation completed successfully.")
88
- return output_path
89
 
90
  except Exception as e:
91
- error_message = f"Error during video generation: {e}\nTraceback:\n{traceback.format_exc()}"
92
- print(error_message) # Log the detailed error
93
- return error_message # Return error as feedback
 
94
 
95
  # Gradio interface setup
96
  def gradio_interface():
97
  with gr.Blocks() as demo:
98
  with gr.Row():
99
  with gr.Column():
100
- mp3_input = gr.Audio(type="filepath", label="Upload MP3")
101
- image_input = gr.File(type="filepath", file_types=[".jpg", ".png"], label="Upload Images", file_count="multiple")
102
- generate_button = gr.Button("Generate Video")
103
-
104
- output_video = gr.Video(label="Generated Video")
105
- error_output = gr.Textbox(label="Error Log", interactive=False)
106
 
107
  generate_button.click(
108
  fn=process_and_generate_video,
109
  inputs=[mp3_input, image_input],
110
- outputs=[output_video, error_output]
111
  )
112
-
113
  demo.launch()
114
 
115
- # Run the Gradio interface
116
  gradio_interface()
 
2
  import moviepy.editor as mp
3
  import numpy as np
4
  from PIL import Image
5
+ import tempfile
6
  import os
7
  import traceback
 
 
 
 
8
 
9
  # Resize image while maintaining aspect ratio
10
+ def resize_image_with_aspect_ratio(img, target_size=(1280, 720), padding_color=(0, 0, 0)):
11
  width, height = img.size
12
  target_width, target_height = target_size
13
 
 
23
 
24
  img_resized = img.resize((new_width, new_height))
25
  final_img = Image.new('RGB', target_size, padding_color)
26
+ padding_left = (target_width - new_width) // 2
27
+ padding_top = (target_height - new_height) // 2
28
+ final_img.paste(img_resized, (padding_left, padding_top))
29
 
30
  return final_img
31
 
 
 
 
 
32
  # Video generation function
33
  def process_and_generate_video(audio_file, images):
34
+ debug_log = [] # Log for debugging
35
+
36
  try:
37
+ # Validate inputs
38
+ if not audio_file or not images:
39
+ raise ValueError("Both audio and images are required for video generation.")
40
+
41
+ # Log received inputs
42
+ debug_log.append(f"Received audio file: {audio_file}")
43
+ debug_log.append(f"Received images: {[img for img in images]}")
44
+
45
+ # Load audio file
46
+ audio = mp.AudioFileClip(audio_file)
 
 
 
 
 
 
 
47
  audio_duration = audio.duration
48
+ debug_log.append(f"Audio duration: {audio_duration:.2f} seconds")
49
 
50
  # Process images
51
  image_clips = []
52
  image_duration = audio_duration / len(images)
 
53
  for img_path in images:
54
+ debug_log.append(f"Processing image: {img_path}")
55
+ img = Image.open(img_path)
56
+ resized_img = resize_image_with_aspect_ratio(img, target_size=(1280, 720))
57
+ img_clip = mp.ImageClip(np.array(resized_img)).set_duration(image_duration).set_fps(24)
 
 
 
 
 
58
  image_clips.append(img_clip)
59
 
60
+ debug_log.append(f"Created {len(image_clips)} image clips")
61
 
62
  # Concatenate clips
63
  video = mp.concatenate_videoclips(image_clips, method="compose")
64
  video = video.set_audio(audio)
65
 
66
+ # Save video to a temporary path
67
  output_path = "/tmp/generated_video.mp4"
 
68
  video.write_videofile(output_path, codec="libx264", audio_codec="aac")
69
+ debug_log.append(f"Video successfully saved to {output_path}")
70
 
71
+ return output_path, "\n".join(debug_log)
 
72
 
73
  except Exception as e:
74
+ error_message = f"Error generating video: {str(e)}"
75
+ debug_log.append(error_message)
76
+ debug_log.append(traceback.format_exc())
77
+ return None, "\n".join(debug_log)
78
 
79
  # Gradio interface setup
80
  def gradio_interface():
81
  with gr.Blocks() as demo:
82
  with gr.Row():
83
  with gr.Column():
84
+ mp3_input = gr.Audio(type="filepath", label="Upload MP3") # MP3 input
85
+ image_input = gr.File(type="filepath", file_types=[".jpg", ".png"], label="Upload Images", file_count="multiple") # Image inputs
86
+ generate_button = gr.Button("Generate Video") # Button to generate video
87
+
88
+ output_video = gr.Video(label="Generated Video") # Display the output video
89
+ debug_logs = gr.Textbox(label="Debug Logs", interactive=False) # Display debug logs
90
 
91
  generate_button.click(
92
  fn=process_and_generate_video,
93
  inputs=[mp3_input, image_input],
94
+ outputs=[output_video, debug_logs]
95
  )
96
+
97
  demo.launch()
98
 
99
+ # Run the Gradio app
100
  gradio_interface()