kevinwang676 commited on
Commit
a4a10af
·
1 Parent(s): 07e3e26

Update app_multi.py

Browse files
Files changed (1) hide show
  1. app_multi.py +7 -4
app_multi.py CHANGED
@@ -74,7 +74,8 @@ def combine_music(video, audio):
74
  audio_background = mpe.AudioFileClip(audio)
75
  final_audio = mpe.CompositeAudioClip([my_clip.audio, audio_background])
76
  final_clip = my_clip.set_audio(final_audio)
77
- return final_clip
 
78
 
79
  # Reference: https://huggingface.co/spaces/zomehwh/rvc-models/blob/main/app.py#L21 # noqa
80
  in_hf_space = getenv('SYSTEM') == 'spaces'
@@ -800,6 +801,8 @@ with app:
800
 
801
  submit = gr.Button('想把我唱给你听', elem_id="sadtalker_generate", variant='primary')
802
 
 
 
803
  gen_mv = gr.Button('为视频添加伴奏吧', variant='primary')
804
 
805
  with gr.Row():
@@ -816,8 +819,8 @@ with app:
816
  # height = gr.Slider(minimum=64, elem_id="img2img_height", maximum=2048, step=8, label="Manually Crop Height", value=512) # img2img_width
817
  pose_style = gr.Slider(minimum=0, maximum=46, step=1, label="Pose style", value=0, visible=False) #
818
  size_of_image = gr.Radio([256, 512], value=256, label='face model resolution', info="use 256/512 model?", visible=False) #
819
- preprocess_type = gr.Radio(['crop', 'full'], value='crop', label='是否聚焦角色面部', info="crop:视频会聚焦角色面部;full:视频会显示图片全貌")
820
- is_still_mode = gr.Checkbox(label="静态模式 (开启静态模式,角色的面部动作会减少;默认开启)", value=True)
821
  batch_size = gr.Slider(label="Batch size (数值越大,生成速度越快;若显卡性能好,可增大数值)", step=1, maximum=32, value=4)
822
  enhancer = gr.Checkbox(label="GFPGAN as Face enhancer", visible=False)
823
 
@@ -836,7 +839,7 @@ with app:
836
  outputs=[gen_video]
837
  )
838
 
839
- gen_mv.click(fn=combine_music, inputs=[gen_video, as_audio_no_vocals], outputs=[music_video])
840
 
841
 
842
  gr.Markdown("### <center>注意❗:请不要生成会对个人以及组织造成侵害的内容,此程序仅供科研、学习及个人娱乐使用。</center>")
 
74
  audio_background = mpe.AudioFileClip(audio)
75
  final_audio = mpe.CompositeAudioClip([my_clip.audio, audio_background])
76
  final_clip = my_clip.set_audio(final_audio)
77
+ final_clip.write_videofile("video.mp4")
78
+ return "video.mp4"
79
 
80
  # Reference: https://huggingface.co/spaces/zomehwh/rvc-models/blob/main/app.py#L21 # noqa
81
  in_hf_space = getenv('SYSTEM') == 'spaces'
 
801
 
802
  submit = gr.Button('想把我唱给你听', elem_id="sadtalker_generate", variant='primary')
803
 
804
+ inp_mv_1 = gen_video
805
+ inp_mv_2 = as_audio_no_vocals
806
  gen_mv = gr.Button('为视频添加伴奏吧', variant='primary')
807
 
808
  with gr.Row():
 
819
  # height = gr.Slider(minimum=64, elem_id="img2img_height", maximum=2048, step=8, label="Manually Crop Height", value=512) # img2img_width
820
  pose_style = gr.Slider(minimum=0, maximum=46, step=1, label="Pose style", value=0, visible=False) #
821
  size_of_image = gr.Radio([256, 512], value=256, label='face model resolution', info="use 256/512 model?", visible=False) #
822
+ preprocess_type = gr.Radio(['crop', 'extfull'], value='crop', label='是否聚焦角色面部', info="crop:视频会聚焦角色面部;extfull:视频会显示图片全貌")
823
+ is_still_mode = gr.Checkbox(label="静态模式 (开启静态模式,角色的面部动作会减少;默认开启)", value=True, visible=False)
824
  batch_size = gr.Slider(label="Batch size (数值越大,生成速度越快;若显卡性能好,可增大数值)", step=1, maximum=32, value=4)
825
  enhancer = gr.Checkbox(label="GFPGAN as Face enhancer", visible=False)
826
 
 
839
  outputs=[gen_video]
840
  )
841
 
842
+ gen_mv.click(fn=combine_music, inputs=[inp_mv_1, inp_mv_2], outputs=[music_video])
843
 
844
 
845
  gr.Markdown("### <center>注意❗:请不要生成会对个人以及组织造成侵害的内容,此程序仅供科研、学习及个人娱乐使用。</center>")