OMilosh commited on
Commit
e390658
·
verified ·
1 Parent(s): f8d575d

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +8 -5
app.py CHANGED
@@ -1,6 +1,7 @@
1
  import gradio as gr
2
  import numpy as np
3
  import random
 
4
 
5
  # import spaces #[uncomment to use ZeroGPU]
6
  from diffusers import DiffusionPipeline
@@ -25,11 +26,6 @@ def init_model(model_repo_id):
25
  torch_dtype = torch.float16 if torch.cuda.is_available() else torch.float32
26
  pipe = DiffusionPipeline.from_pretrained(model_repo_id, torch_dtype=torch_dtype)
27
  return pipe
28
-
29
- loaded_models = {}
30
-
31
- for model in available_models:
32
- loaded_models[model] = init_model(model)
33
 
34
  # @spaces.GPU #[uncomment to use ZeroGPU]
35
  def infer(
@@ -153,6 +149,13 @@ with gr.Blocks(css=css) as demo:
153
  )
154
 
155
  gr.Examples(examples=examples, inputs=[prompt])
 
 
 
 
 
 
 
156
  gr.on(
157
  triggers=[run_button.click, prompt.submit],
158
  fn=infer,
 
1
  import gradio as gr
2
  import numpy as np
3
  import random
4
+ from tqdm import tqdm
5
 
6
  # import spaces #[uncomment to use ZeroGPU]
7
  from diffusers import DiffusionPipeline
 
26
  torch_dtype = torch.float16 if torch.cuda.is_available() else torch.float32
27
  pipe = DiffusionPipeline.from_pretrained(model_repo_id, torch_dtype=torch_dtype)
28
  return pipe
 
 
 
 
 
29
 
30
  # @spaces.GPU #[uncomment to use ZeroGPU]
31
  def infer(
 
149
  )
150
 
151
  gr.Examples(examples=examples, inputs=[prompt])
152
+
153
+
154
+ loaded_models = {}
155
+
156
+ for model in tqdm(available_models):
157
+ loaded_models[model] = init_model(model)
158
+
159
  gr.on(
160
  triggers=[run_button.click, prompt.submit],
161
  fn=infer,