seawolf2357 commited on
Commit
734692a
·
verified ·
1 Parent(s): ae7e646

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +14 -3
app.py CHANGED
@@ -3,6 +3,9 @@ import gradio as gr
3
  from diffusers import AnimateDiffPipeline, MotionAdapter, DPMSolverMultistepScheduler, AutoencoderKL, SparseControlNetModel, EulerAncestralDiscreteScheduler
4
  from diffusers.utils import export_to_gif, load_image
5
  from realesrgan import RealESRGAN
 
 
 
6
 
7
  device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
8
 
@@ -14,6 +17,13 @@ def enhance_quality(image_path):
14
  sr_image.save('enhanced_' + image_path)
15
  return 'enhanced_' + image_path
16
 
 
 
 
 
 
 
 
17
  def generate_video(prompt, negative_prompt, num_inference_steps, conditioning_frame_indices, controlnet_conditioning_scale):
18
  motion_adapter = MotionAdapter.from_pretrained("guoyww/animatediff-motion-adapter-v1-5-3", torch_dtype=torch.float16).to(device)
19
  controlnet = SparseControlNetModel.from_pretrained("guoyww/animatediff-sparsectrl-scribble", torch_dtype=torch.float16).to(device)
@@ -51,7 +61,8 @@ def generate_video(prompt, negative_prompt, num_inference_steps, conditioning_fr
51
 
52
  export_to_gif(video, "output.gif")
53
  enhanced_gif = enhance_quality("output.gif")
54
- return enhanced_gif
 
55
 
56
  def generate_simple_video(prompt):
57
  adapter = MotionAdapter.from_pretrained("guoyww/animatediff-motion-adapter-v1-5-2", torch_dtype=torch.float16).to(device)
@@ -76,7 +87,8 @@ def generate_simple_video(prompt):
76
 
77
  export_to_gif(frames, "simple_output.gif")
78
  enhanced_gif = enhance_quality("simple_output.gif")
79
- return enhanced_gif
 
80
 
81
  demo1 = gr.Interface(
82
  fn=generate_video,
@@ -102,6 +114,5 @@ demo2 = gr.Interface(
102
 
103
  demo = gr.TabbedInterface([demo1, demo2], ["Advanced Video Generation", "Simple Video Generation"])
104
 
105
-
106
  demo.launch()
107
  #demo.launch(server_name="0.0.0.0", server_port=7910)
 
3
  from diffusers import AnimateDiffPipeline, MotionAdapter, DPMSolverMultistepScheduler, AutoencoderKL, SparseControlNetModel, EulerAncestralDiscreteScheduler
4
  from diffusers.utils import export_to_gif, load_image
5
  from realesrgan import RealESRGAN
6
+ from PIL import Image
7
+ import cv2
8
+ import numpy as np
9
 
10
  device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
11
 
 
17
  sr_image.save('enhanced_' + image_path)
18
  return 'enhanced_' + image_path
19
 
20
+ def denoise_image(image_path):
21
+ image = cv2.imread(image_path)
22
+ denoised_image = cv2.fastNlMeansDenoisingColored(image, None, 10, 10, 7, 21)
23
+ denoised_path = 'denoised_' + image_path
24
+ cv2.imwrite(denoised_path, denoised_image)
25
+ return denoised_path
26
+
27
  def generate_video(prompt, negative_prompt, num_inference_steps, conditioning_frame_indices, controlnet_conditioning_scale):
28
  motion_adapter = MotionAdapter.from_pretrained("guoyww/animatediff-motion-adapter-v1-5-3", torch_dtype=torch.float16).to(device)
29
  controlnet = SparseControlNetModel.from_pretrained("guoyww/animatediff-sparsectrl-scribble", torch_dtype=torch.float16).to(device)
 
61
 
62
  export_to_gif(video, "output.gif")
63
  enhanced_gif = enhance_quality("output.gif")
64
+ denoised_gif = denoise_image(enhanced_gif)
65
+ return denoised_gif
66
 
67
  def generate_simple_video(prompt):
68
  adapter = MotionAdapter.from_pretrained("guoyww/animatediff-motion-adapter-v1-5-2", torch_dtype=torch.float16).to(device)
 
87
 
88
  export_to_gif(frames, "simple_output.gif")
89
  enhanced_gif = enhance_quality("simple_output.gif")
90
+ denoised_gif = denoise_image(enhanced_gif)
91
+ return denoised_gif
92
 
93
  demo1 = gr.Interface(
94
  fn=generate_video,
 
114
 
115
  demo = gr.TabbedInterface([demo1, demo2], ["Advanced Video Generation", "Simple Video Generation"])
116
 
 
117
  demo.launch()
118
  #demo.launch(server_name="0.0.0.0", server_port=7910)