1inkusFace commited on
Commit
c2aa7b8
·
verified ·
1 Parent(s): 51ca48d

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +18 -14
app.py CHANGED
@@ -12,11 +12,23 @@ from diffusers.utils import export_to_video
12
  from diffusers.utils import load_image
13
  from PIL import Image
14
 
15
- #predictor = None
16
- #task_type = None
 
 
 
 
 
 
 
 
 
 
 
 
 
17
  os.putenv("TOKENIZERS_PARALLELISM","False")
18
 
19
- #@spaces.GPU(duration=120)
20
  def init_predictor():
21
  global predictor
22
  predictor = SkyReelsVideoSingleGpuInfer(
@@ -31,8 +43,8 @@ def init_predictor():
31
  )
32
  )
33
 
34
- @spaces.GPU(duration=80)
35
- def generate_video(prompt, seed, image, size, steps, frames):
36
  print(f"image:{type(image)}")
37
  if seed == -1:
38
  random.seed(time.time())
@@ -53,7 +65,6 @@ def generate_video(prompt, seed, image, size, steps, frames):
53
  img = load_image(image=image)
54
  img.resize((320,320), Image.LANCZOS)
55
  kwargs["image"] = img
56
- #global predictor
57
  output = predictor.inference(kwargs)
58
  save_dir = f"./"
59
  video_out_file = f"{save_dir}/{seed}.mp4"
@@ -61,7 +72,6 @@ def generate_video(prompt, seed, image, size, steps, frames):
61
  export_to_video(output, video_out_file, fps=24)
62
  return video_out_file
63
 
64
- #def create_gradio_interface():
65
  with gr.Blocks() as demo:
66
  with gr.Row():
67
  image = gr.Image(label="Upload Image", type="filepath")
@@ -95,13 +105,7 @@ with gr.Blocks() as demo:
95
  inputs=[prompt, seed, image, size, steps, frames],
96
  outputs=[output_video],
97
  )
98
- # return demo
99
 
100
- #init_predictor()
101
-
102
  if __name__ == "__main__":
103
- #import multiprocessing
104
- #multiprocessing.freeze_support()
105
  init_predictor()
106
- #demo = create_gradio_interface()
107
- demo.launch()
 
12
  from diffusers.utils import load_image
13
  from PIL import Image
14
 
15
+ import torch
16
+
17
+ torch.backends.cuda.matmul.allow_tf32 = False
18
+ torch.backends.cuda.matmul.allow_bf16_reduced_precision_reduction = False
19
+ torch.backends.cuda.matmul.allow_fp16_reduced_precision_reduction = False
20
+ torch.backends.cudnn.allow_tf32 = False
21
+ torch.backends.cudnn.deterministic = False
22
+ torch.backends.cudnn.benchmark = False
23
+ torch.backends.cuda.preferred_blas_library="cublas"
24
+ torch.backends.cuda.preferred_linalg_library="cusolver"
25
+ torch.set_float32_matmul_precision("highest")
26
+
27
+ os.putenv("HF_HUB_ENABLE_HF_TRANSFER","1")
28
+ os.environ["SAFETENSORS_FAST_GPU"] = "1"
29
+
30
  os.putenv("TOKENIZERS_PARALLELISM","False")
31
 
 
32
  def init_predictor():
33
  global predictor
34
  predictor = SkyReelsVideoSingleGpuInfer(
 
43
  )
44
  )
45
 
46
+ @spaces.GPU(duration=90)
47
+ def generate_video(prompt, seed, image, size, steps, frames, progress=gr.Progress(track_tqdm=True) ):
48
  print(f"image:{type(image)}")
49
  if seed == -1:
50
  random.seed(time.time())
 
65
  img = load_image(image=image)
66
  img.resize((320,320), Image.LANCZOS)
67
  kwargs["image"] = img
 
68
  output = predictor.inference(kwargs)
69
  save_dir = f"./"
70
  video_out_file = f"{save_dir}/{seed}.mp4"
 
72
  export_to_video(output, video_out_file, fps=24)
73
  return video_out_file
74
 
 
75
  with gr.Blocks() as demo:
76
  with gr.Row():
77
  image = gr.Image(label="Upload Image", type="filepath")
 
105
  inputs=[prompt, seed, image, size, steps, frames],
106
  outputs=[output_video],
107
  )
 
108
 
 
 
109
  if __name__ == "__main__":
 
 
110
  init_predictor()
111
+ demo.launch()