codermert commited on
Commit
32811b0
·
verified ·
1 Parent(s): b7c757c

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +2 -8
app.py CHANGED
@@ -4,7 +4,6 @@ import json
4
  import logging
5
  import torch
6
  from PIL import Image
7
- import spaces
8
  from diffusers import DiffusionPipeline, AutoencoderTiny, AutoencoderKL, AutoPipelineForImage2Image
9
  from live_preview_helpers import calculate_shift, retrieve_timesteps, flux_pipe_call_that_returns_an_iterable_of_images
10
  from diffusers.utils import load_image
@@ -79,7 +78,6 @@ def update_selection(evt: gr.SelectData, width, height):
79
  height,
80
  )
81
 
82
- @spaces.GPU(duration=70)
83
  def generate_image(prompt_mash, steps, seed, cfg_scale, width, height, lora_scale, progress):
84
  pipe.to("cuda")
85
  generator = torch.Generator(device="cuda").manual_seed(seed)
@@ -116,7 +114,6 @@ def generate_image_to_image(prompt_mash, image_input_path, image_strength, steps
116
  ).images[0]
117
  return final_image
118
 
119
- @spaces.GPU(duration=70)
120
  def run_lora(prompt, image_input, image_strength, cfg_scale, steps, selected_index, randomize_seed, seed, width, height, lora_scale, progress=gr.Progress(track_tqdm=True)):
121
  if selected_index is None:
122
  raise gr.Error("You must select a LoRA before proceeding.")
@@ -155,7 +152,6 @@ def run_lora(prompt, image_input, image_strength, cfg_scale, steps, selected_ind
155
  seed = random.randint(0, MAX_SEED)
156
 
157
  if(image_input is not None):
158
-
159
  final_image = generate_image_to_image(prompt_mash, image_input, image_strength, steps, cfg_scale, width, height, lora_scale, seed)
160
  yield final_image, seed, gr.update(visible=False)
161
  else:
@@ -240,7 +236,6 @@ def get_huggingface_safetensors(link):
240
 
241
  return split_link[1], link, safetensors_name, trigger_word, image_url
242
 
243
-
244
  def check_custom_model(link):
245
  if(link.startswith("https://")):
246
  if(link.startswith("https://huggingface.co") or link.startswith("https://www.huggingface.co")):
@@ -290,8 +285,6 @@ def add_custom_lora(custom_lora):
290
  def remove_custom_lora():
291
  return gr.update(visible=False), gr.update(visible=False), gr.update(), "", None, ""
292
 
293
- run_lora.zerogpu = True
294
-
295
  css = '''
296
  #gen_btn{height: 100%}
297
  #gen_column{align-self: stretch}
@@ -308,8 +301,9 @@ css = '''
308
  .progress-container {width: 100%;height: 30px;background-color: #f0f0f0;border-radius: 15px;overflow: hidden;margin-bottom: 20px}
309
  .progress-bar {height: 100%;background-color: #4f46e5;width: calc(var(--current) / var(--total) * 100%);transition: width 0.5s ease-in-out}
310
  '''
 
311
  font=[gr.themes.GoogleFont("Source Sans Pro"), "Arial", "sans-serif"]
312
- with gr.Blocks(theme=gr.themes.Soft(font=font), css=css, delete_cache=(60, 60)) as app:
313
  title = gr.HTML(
314
  """<h1><img src="https://huggingface.co/spaces/multimodalart/flux-lora-the-explorer/resolve/main/flux_lora.png" alt="LoRA"> FLUX LoRA the Explorer</h1>""",
315
  elem_id="title",
 
4
  import logging
5
  import torch
6
  from PIL import Image
 
7
  from diffusers import DiffusionPipeline, AutoencoderTiny, AutoencoderKL, AutoPipelineForImage2Image
8
  from live_preview_helpers import calculate_shift, retrieve_timesteps, flux_pipe_call_that_returns_an_iterable_of_images
9
  from diffusers.utils import load_image
 
78
  height,
79
  )
80
 
 
81
  def generate_image(prompt_mash, steps, seed, cfg_scale, width, height, lora_scale, progress):
82
  pipe.to("cuda")
83
  generator = torch.Generator(device="cuda").manual_seed(seed)
 
114
  ).images[0]
115
  return final_image
116
 
 
117
  def run_lora(prompt, image_input, image_strength, cfg_scale, steps, selected_index, randomize_seed, seed, width, height, lora_scale, progress=gr.Progress(track_tqdm=True)):
118
  if selected_index is None:
119
  raise gr.Error("You must select a LoRA before proceeding.")
 
152
  seed = random.randint(0, MAX_SEED)
153
 
154
  if(image_input is not None):
 
155
  final_image = generate_image_to_image(prompt_mash, image_input, image_strength, steps, cfg_scale, width, height, lora_scale, seed)
156
  yield final_image, seed, gr.update(visible=False)
157
  else:
 
236
 
237
  return split_link[1], link, safetensors_name, trigger_word, image_url
238
 
 
239
  def check_custom_model(link):
240
  if(link.startswith("https://")):
241
  if(link.startswith("https://huggingface.co") or link.startswith("https://www.huggingface.co")):
 
285
  def remove_custom_lora():
286
  return gr.update(visible=False), gr.update(visible=False), gr.update(), "", None, ""
287
 
 
 
288
  css = '''
289
  #gen_btn{height: 100%}
290
  #gen_column{align-self: stretch}
 
301
  .progress-container {width: 100%;height: 30px;background-color: #f0f0f0;border-radius: 15px;overflow: hidden;margin-bottom: 20px}
302
  .progress-bar {height: 100%;background-color: #4f46e5;width: calc(var(--current) / var(--total) * 100%);transition: width 0.5s ease-in-out}
303
  '''
304
+
305
  font=[gr.themes.GoogleFont("Source Sans Pro"), "Arial", "sans-serif"]
306
+ with gr.Blocks(theme=gr.themes.Soft(font=font), css=css) as app:
307
  title = gr.HTML(
308
  """<h1><img src="https://huggingface.co/spaces/multimodalart/flux-lora-the-explorer/resolve/main/flux_lora.png" alt="LoRA"> FLUX LoRA the Explorer</h1>""",
309
  elem_id="title",