Spaces:
Running
on
Zero
Running
on
Zero
Update app.py
Browse files
app.py
CHANGED
@@ -9,7 +9,7 @@ from huggingface_hub import hf_hub_download
|
|
9 |
import torch
|
10 |
from diffusers import DiffusionPipeline
|
11 |
from huggingface_hub import hf_hub_download
|
12 |
-
|
13 |
|
14 |
# Constants
|
15 |
MAX_SEED = np.iinfo(np.int32).max
|
@@ -23,11 +23,11 @@ SINGLE_MODAL_VITAL_LAYERS = list(np.array([28, 53, 54, 56, 25]) - 19)
|
|
23 |
pipe = DiffusionPipeline.from_pretrained("black-forest-labs/FLUX.1-dev",
|
24 |
|
25 |
torch_dtype=torch.bfloat16)
|
26 |
-
|
27 |
-
|
28 |
|
29 |
#pipe.enable_lora()
|
30 |
-
pipe.to(DEVICE)
|
31 |
|
32 |
def get_examples():
|
33 |
case = [
|
@@ -103,6 +103,7 @@ def invert_and_edit(image,
|
|
103 |
num_inference_steps,
|
104 |
seed,
|
105 |
randomize_seed,
|
|
|
106 |
width = 1024,
|
107 |
height = 1024,
|
108 |
inverted_latent_list = None,
|
@@ -112,7 +113,7 @@ def invert_and_edit(image,
|
|
112 |
):
|
113 |
if randomize_seed:
|
114 |
seed = random.randint(0, MAX_SEED)
|
115 |
-
if image_input:
|
116 |
if do_inversion:
|
117 |
inverted_latent_list = pipe(
|
118 |
source_prompt,
|
@@ -122,7 +123,7 @@ def invert_and_edit(image,
|
|
122 |
output_type="pil",
|
123 |
num_inference_steps=num_inversion_steps,
|
124 |
max_sequence_length=512,
|
125 |
-
latents=image2latent(image),
|
126 |
invert_image=True
|
127 |
)
|
128 |
do_inversion = False
|
@@ -130,7 +131,7 @@ def invert_and_edit(image,
|
|
130 |
else:
|
131 |
# move to gpu because of zero and gr.states
|
132 |
inverted_latent_list = [tensor.to(DEVICE) for tensor in inverted_latent_list]
|
133 |
-
|
134 |
latents = inverted_latent_list[-1].tile(2, 1, 1)
|
135 |
guidance_scale = [1,3]
|
136 |
image_input = True
|
@@ -168,7 +169,10 @@ def invert_and_edit(image,
|
|
168 |
# move back to cpu because of zero and gr.states
|
169 |
if inverted_latent_list is not None:
|
170 |
inverted_latent_list = [tensor.cpu() for tensor in inverted_latent_list]
|
171 |
-
|
|
|
|
|
|
|
172 |
|
173 |
# UI CSS
|
174 |
css = """
|
@@ -252,7 +256,7 @@ following the algorithm proposed in [*Stable Flow: Vital Layers for Training-Fre
|
|
252 |
minimum=1,
|
253 |
maximum=50,
|
254 |
step=1,
|
255 |
-
value=
|
256 |
)
|
257 |
|
258 |
|
@@ -265,6 +269,13 @@ following the algorithm proposed in [*Stable Flow: Vital Layers for Training-Fre
|
|
265 |
step=1,
|
266 |
value=25,
|
267 |
)
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
268 |
|
269 |
with gr.Row():
|
270 |
width = gr.Slider(
|
@@ -297,6 +308,7 @@ following the algorithm proposed in [*Stable Flow: Vital Layers for Training-Fre
|
|
297 |
num_inference_steps,
|
298 |
seed,
|
299 |
randomize_seed,
|
|
|
300 |
width,
|
301 |
height,
|
302 |
inverted_latents,
|
|
|
9 |
import torch
|
10 |
from diffusers import DiffusionPipeline
|
11 |
from huggingface_hub import hf_hub_download
|
12 |
+
from gradio_imageslider import ImageSlider
|
13 |
|
14 |
# Constants
|
15 |
MAX_SEED = np.iinfo(np.int32).max
|
|
|
23 |
pipe = DiffusionPipeline.from_pretrained("black-forest-labs/FLUX.1-dev",
|
24 |
|
25 |
torch_dtype=torch.bfloat16)
|
26 |
+
pipe.load_lora_weights(hf_hub_download("ByteDance/Hyper-SD", "Hyper-FLUX.1-dev-8steps-lora.safetensors"))
|
27 |
+
pipe.fuse_lora(lora_scale=0.125)
|
28 |
|
29 |
#pipe.enable_lora()
|
30 |
+
pipe.to(DEVICE, dtype=torch.float16)
|
31 |
|
32 |
def get_examples():
|
33 |
case = [
|
|
|
103 |
num_inference_steps,
|
104 |
seed,
|
105 |
randomize_seed,
|
106 |
+
latent_nudging_scalar,
|
107 |
width = 1024,
|
108 |
height = 1024,
|
109 |
inverted_latent_list = None,
|
|
|
113 |
):
|
114 |
if randomize_seed:
|
115 |
seed = random.randint(0, MAX_SEED)
|
116 |
+
if image_input and (image is not None):
|
117 |
if do_inversion:
|
118 |
inverted_latent_list = pipe(
|
119 |
source_prompt,
|
|
|
123 |
output_type="pil",
|
124 |
num_inference_steps=num_inversion_steps,
|
125 |
max_sequence_length=512,
|
126 |
+
latents=image2latent(image, latent_nudging_scalar),
|
127 |
invert_image=True
|
128 |
)
|
129 |
do_inversion = False
|
|
|
131 |
else:
|
132 |
# move to gpu because of zero and gr.states
|
133 |
inverted_latent_list = [tensor.to(DEVICE) for tensor in inverted_latent_list]
|
134 |
+
num_inference_steps = num_inversion_steps
|
135 |
latents = inverted_latent_list[-1].tile(2, 1, 1)
|
136 |
guidance_scale = [1,3]
|
137 |
image_input = True
|
|
|
169 |
# move back to cpu because of zero and gr.states
|
170 |
if inverted_latent_list is not None:
|
171 |
inverted_latent_list = [tensor.cpu() for tensor in inverted_latent_list]
|
172 |
+
if image is None:
|
173 |
+
image = output[0]
|
174 |
+
|
175 |
+
return image, output[1], inverted_latent_list, do_inversion, image_input, seed
|
176 |
|
177 |
# UI CSS
|
178 |
css = """
|
|
|
256 |
minimum=1,
|
257 |
maximum=50,
|
258 |
step=1,
|
259 |
+
value=8,
|
260 |
)
|
261 |
|
262 |
|
|
|
269 |
step=1,
|
270 |
value=25,
|
271 |
)
|
272 |
+
latent_nudging_scalar= gr.Slider(
|
273 |
+
label="latent nudging scalar",
|
274 |
+
minimum=1,
|
275 |
+
maximum=5,
|
276 |
+
step=0.01,
|
277 |
+
value=1.15,
|
278 |
+
)
|
279 |
|
280 |
with gr.Row():
|
281 |
width = gr.Slider(
|
|
|
308 |
num_inference_steps,
|
309 |
seed,
|
310 |
randomize_seed,
|
311 |
+
latent_nudging_scalar,
|
312 |
width,
|
313 |
height,
|
314 |
inverted_latents,
|