rahul7star commited on
Commit
502938a
·
verified ·
1 Parent(s): 8f29a6d

Update simple_app.py

Browse files
Files changed (1) hide show
  1. simple_app.py +35 -104
simple_app.py CHANGED
@@ -1,44 +1,22 @@
1
  import gradio as gr
2
  import re
3
  import subprocess
4
- import time
5
  import select
6
- from tqdm import tqdm
7
  from huggingface_hub import snapshot_download
8
 
9
- #Download model
10
  snapshot_download(
11
- repo_id = "Wan-AI/Wan2.1-T2V-1.3B",
12
- local_dir = "./Wan2.1-T2V-1.3B"
13
  )
14
 
 
15
  def infer(prompt, progress=gr.Progress(track_tqdm=True)):
16
-
17
- # Configuration:
18
- total_process_steps = 11 # Total INFO messages expected
19
- irrelevant_steps = 4 # First 4 INFO messages are ignored
20
- relevant_steps = total_process_steps - irrelevant_steps # 7 overall steps
21
-
22
- # Create overall progress bar (Level 1)
23
- overall_bar = tqdm(total=relevant_steps, desc="Overall Process", position=1,
24
- ncols=120, dynamic_ncols=False, leave=True)
25
- processed_steps = 0
26
-
27
- # Regex for video generation progress (Level 3)
28
- progress_pattern = re.compile(r"(\d+)%\|.*\| (\d+)/(\d+)")
29
- video_progress_bar = None
30
-
31
- # Variables for sub-step progress bar (Level 2)
32
- # Now using 1000 ticks to represent 40 seconds (each tick = 40 ms)
33
- sub_bar = None
34
- sub_ticks = 0
35
- sub_tick_total = 1500
36
- video_phase = False
37
-
38
  command = [
39
- "python", "-u", "-m", "generate", # using -u for unbuffered output
40
  "--task", "t2v-1.3B",
41
- "--size", "832*480",
42
  "--ckpt_dir", "./Wan2.1-T2V-1.3B",
43
  "--sample_shift", "8",
44
  "--sample_guide_scale", "6",
@@ -46,14 +24,19 @@ def infer(prompt, progress=gr.Progress(track_tqdm=True)):
46
  "--save_file", "generated_video.mp4"
47
  ]
48
 
49
- process = subprocess.Popen(command,
50
- stdout=subprocess.PIPE,
51
- stderr=subprocess.STDOUT,
52
- text=True,
 
53
  bufsize=1)
54
 
 
 
 
 
 
55
  while True:
56
- # Poll stdout with a 40ms timeout.
57
  rlist, _, _ = select.select([process.stdout], [], [], 0.04)
58
  if rlist:
59
  line = process.stdout.readline()
@@ -63,102 +46,50 @@ def infer(prompt, progress=gr.Progress(track_tqdm=True)):
63
  if not stripped_line:
64
  continue
65
 
66
- # Check for video generation progress (Level 3)
67
  progress_match = progress_pattern.search(stripped_line)
68
  if progress_match:
69
- # If a sub-step bar is active, finish it before entering video phase.
70
- if sub_bar is not None:
71
- if sub_ticks < sub_tick_total:
72
- sub_bar.update(sub_tick_total - sub_ticks)
73
- sub_bar.close()
74
- overall_bar.update(1)
75
- overall_bar.refresh()
76
- sub_bar = None
77
- sub_ticks = 0
78
- video_phase = True
79
  current = int(progress_match.group(2))
80
  total = int(progress_match.group(3))
81
  if video_progress_bar is None:
82
- video_progress_bar = tqdm(total=total, desc="Video Generation", position=0,
83
- ncols=120, dynamic_ncols=True, leave=True)
84
- video_progress_bar.update(current - video_progress_bar.n)
85
- video_progress_bar.refresh()
86
- if video_progress_bar.n >= video_progress_bar.total:
87
- video_phase = False
88
- overall_bar.update(1)
89
- overall_bar.refresh()
90
- video_progress_bar.close()
91
- video_progress_bar = None
92
  continue
93
 
94
- # Process INFO messages (Level 2 sub-step)
95
  if "INFO:" in stripped_line:
96
- parts = stripped_line.split("INFO:", 1)
97
- msg = parts[1].strip() if len(parts) > 1 else ""
98
- print(stripped_line) # Log the message
99
-
100
- # For the first 4 INFO messages, simply count them.
101
- if processed_steps < irrelevant_steps:
102
- processed_steps += 1
103
- continue
104
- else:
105
- # A new relevant INFO message has arrived.
106
- # If a sub-bar exists (whether full or not), finish it now.
107
- if sub_bar is not None:
108
- if sub_ticks < sub_tick_total:
109
- sub_bar.update(sub_tick_total - sub_ticks)
110
- sub_bar.close()
111
- overall_bar.update(1)
112
- overall_bar.refresh()
113
- sub_bar = None
114
- sub_ticks = 0
115
- # Start a new sub-step bar for the current INFO message.
116
- sub_bar = tqdm(total=sub_tick_total, desc=msg, position=2,
117
- ncols=120, dynamic_ncols=False, leave=True)
118
- sub_ticks = 0
119
  continue
120
  else:
121
  print(stripped_line)
122
- else:
123
- # No new data within 40ms.
124
- if sub_bar is not None:
125
- if sub_ticks < sub_tick_total:
126
- sub_bar.update(1)
127
- sub_ticks += 1
128
- sub_bar.refresh()
129
- # If full (40 seconds reached), do not advance overall step—just remain waiting.
130
  if process.poll() is not None:
131
  break
132
 
133
- # Drain any remaining output.
134
- for line in process.stdout:
135
- print(line.strip())
136
  process.wait()
137
- if video_progress_bar is not None:
138
  video_progress_bar.close()
139
- if sub_bar is not None:
140
- sub_bar.close()
141
- overall_bar.close()
142
-
143
  if process.returncode == 0:
144
- print("Command executed successfully.")
145
  return "generated_video.mp4"
146
  else:
147
- print("Error executing command.")
148
  raise Exception("Error executing command")
149
 
 
150
  with gr.Blocks() as demo:
151
  with gr.Column():
152
- gr.Markdown("# Wan 2.1 1.3B")
153
- gr.Markdown("Enjoy this simple working UI, duplicate the space to skip the queue :)")
154
  prompt = gr.Textbox(label="Prompt")
155
- submit_btn = gr.Button("Submit")
156
  video_res = gr.Video(label="Generated Video")
157
 
158
  submit_btn.click(
159
- fn = infer,
160
- inputs = [prompt],
161
- outputs = [video_res]
162
  )
163
 
164
- demo.queue().launch(show_error=True, show_api=False, ssr_mode=False)
 
1
  import gradio as gr
2
  import re
3
  import subprocess
 
4
  import select
 
5
  from huggingface_hub import snapshot_download
6
 
7
+ # Download model (for demonstration, adjust based on actual model needs)
8
  snapshot_download(
9
+ repo_id="Wan-AI/Wan2.1-T2V-1.3B",
10
+ local_dir="./Wan2.1-T2V-1.3B"
11
  )
12
 
13
+ # Function to generate video
14
  def infer(prompt, progress=gr.Progress(track_tqdm=True)):
15
+ # Reduced progress output and simplified structure
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
16
  command = [
17
+ "python", "-u", "-m", "generate", # Using unbuffered output
18
  "--task", "t2v-1.3B",
19
+ "--size", "832*480", # You can try reducing resolution further for CPU
20
  "--ckpt_dir", "./Wan2.1-T2V-1.3B",
21
  "--sample_shift", "8",
22
  "--sample_guide_scale", "6",
 
24
  "--save_file", "generated_video.mp4"
25
  ]
26
 
27
+ # Run the model inference in a subprocess
28
+ process = subprocess.Popen(command,
29
+ stdout=subprocess.PIPE,
30
+ stderr=subprocess.PIPE,
31
+ text=True,
32
  bufsize=1)
33
 
34
+ # Monitor progress with a minimal progress bar
35
+ progress_pattern = re.compile(r"(\d+)%\|.*\| (\d+)/(\d+)")
36
+ video_progress_bar = None
37
+ overall_steps = 0
38
+
39
  while True:
 
40
  rlist, _, _ = select.select([process.stdout], [], [], 0.04)
41
  if rlist:
42
  line = process.stdout.readline()
 
46
  if not stripped_line:
47
  continue
48
 
49
+ # Check for video generation progress
50
  progress_match = progress_pattern.search(stripped_line)
51
  if progress_match:
 
 
 
 
 
 
 
 
 
 
52
  current = int(progress_match.group(2))
53
  total = int(progress_match.group(3))
54
  if video_progress_bar is None:
55
+ video_progress_bar = gr.Progress()
56
+ video_progress_bar.update(current / total)
57
+ video_progress_bar.update(current / total)
 
 
 
 
 
 
 
58
  continue
59
 
60
+ # Process info messages (simplified)
61
  if "INFO:" in stripped_line:
62
+ overall_steps += 1
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
63
  continue
64
  else:
65
  print(stripped_line)
66
+
 
 
 
 
 
 
 
67
  if process.poll() is not None:
68
  break
69
 
70
+ # Clean up and finalize the progress bar
 
 
71
  process.wait()
72
+ if video_progress_bar:
73
  video_progress_bar.close()
74
+
75
+ # Return the video file path if successful
 
 
76
  if process.returncode == 0:
 
77
  return "generated_video.mp4"
78
  else:
 
79
  raise Exception("Error executing command")
80
 
81
+ # Gradio UI
82
  with gr.Blocks() as demo:
83
  with gr.Column():
84
+ gr.Markdown("# Wan 2.1 1.3B Video Generation")
 
85
  prompt = gr.Textbox(label="Prompt")
86
+ submit_btn = gr.Button("Generate Video")
87
  video_res = gr.Video(label="Generated Video")
88
 
89
  submit_btn.click(
90
+ fn=infer,
91
+ inputs=[prompt],
92
+ outputs=[video_res]
93
  )
94
 
95
+ demo.queue().launch(show_error=True, show_api=False)