Spaces:
Runtime error
Runtime error
Update app.py
Browse files
app.py
CHANGED
@@ -177,6 +177,13 @@ with gr.Blocks(css=css) as demo:
|
|
177 |
|
178 |
with gr.Row():
|
179 |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
180 |
guidance_scale2 = gr.Slider(
|
181 |
label="Guidance Scale",
|
182 |
minimum=1,
|
@@ -209,23 +216,40 @@ with gr.Blocks(css=css) as demo:
|
|
209 |
return f"Failed to load image from URL: {e}"
|
210 |
return None
|
211 |
|
212 |
-
@spaces.GPU
|
213 |
def image2image(uploaded_image, image_url, use_generated=False):
|
214 |
-
image = select_image(uploaded_image, image_url, use_generated=
|
215 |
-
prompt = "one awesome dude"
|
216 |
-
generator = torch.Generator(device=device).manual_seed(1024)
|
217 |
-
image = pipeline2Image(prompt=prompt, image=image, strength=0.75, guidance_scale=7.5, generator=generator).images[0]
|
218 |
return image
|
219 |
|
220 |
use_generated_image.click(fn=lambda: image2image(None, None, True), inputs=[], outputs=additional_image_output)
|
221 |
uploaded_image.change(fn=image2image, inputs=[uploaded_image, image_url, gr.State(False)], outputs=additional_image_output)
|
222 |
image_url.submit(fn=image2image, inputs=[uploaded_image, image_url, gr.State(False)], outputs=additional_image_output)
|
223 |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
224 |
gr.on(
|
225 |
triggers=[run2_button.click, prompt2.submit],
|
226 |
-
fn=
|
227 |
-
inputs=[
|
228 |
-
outputs
|
229 |
)
|
230 |
|
231 |
demo.launch()
|
|
|
177 |
|
178 |
with gr.Row():
|
179 |
|
180 |
+
strength2 = gr.Slider(
|
181 |
+
label="Strength",
|
182 |
+
minimum=.1,
|
183 |
+
maximum=1,
|
184 |
+
step=0.1,
|
185 |
+
value=.5,
|
186 |
+
)
|
187 |
guidance_scale2 = gr.Slider(
|
188 |
label="Guidance Scale",
|
189 |
minimum=1,
|
|
|
216 |
return f"Failed to load image from URL: {e}"
|
217 |
return None
|
218 |
|
|
|
219 |
def image2image(uploaded_image, image_url, use_generated=False):
|
220 |
+
image = select_image(uploaded_image, image_url, use_generated=use_generated)
|
221 |
+
#prompt = "one awesome dude"
|
222 |
+
#generator = torch.Generator(device=device).manual_seed(1024)
|
223 |
+
#image = pipeline2Image(prompt=prompt, image=image, strength=0.75, guidance_scale=7.5, generator=generator).images[0]
|
224 |
return image
|
225 |
|
226 |
use_generated_image.click(fn=lambda: image2image(None, None, True), inputs=[], outputs=additional_image_output)
|
227 |
uploaded_image.change(fn=image2image, inputs=[uploaded_image, image_url, gr.State(False)], outputs=additional_image_output)
|
228 |
image_url.submit(fn=image2image, inputs=[uploaded_image, image_url, gr.State(False)], outputs=additional_image_output)
|
229 |
|
230 |
+
@spaces.GPU
|
231 |
+
def infer2(prompt, seed=42, image, randomize_seed=False, width=1024, height=1024, strength=.5, guidance_scale=5.0, num_inference_steps=28, progress=gr.Progress(track_tqdm=True)):
|
232 |
+
if randomize_seed:
|
233 |
+
seed = random.randint(0, MAX_SEED)
|
234 |
+
generator = torch.Generator(device=device).manual_seed(seed)
|
235 |
+
image = pipeline2Image(prompt=prompt, image=image, strength=strength, guidance_scale=guidance_scale, num_inference_steps=num_inference_steps, generator=generator).images[0]
|
236 |
+
# generator = torch.Generator().manual_seed(seed)
|
237 |
+
# image = pipe(
|
238 |
+
# prompt=prompt,
|
239 |
+
# width=width,
|
240 |
+
# height=height,
|
241 |
+
# num_inference_steps=num_inference_steps,
|
242 |
+
# generator=generator,
|
243 |
+
# guidance_scale=guidance_scale
|
244 |
+
# ).images[0]
|
245 |
+
return image, seed
|
246 |
+
final_image_output = gr.Image(label="Final Image", show_label=False)
|
247 |
+
|
248 |
gr.on(
|
249 |
triggers=[run2_button.click, prompt2.submit],
|
250 |
+
fn=infer2,
|
251 |
+
inputs=[prompt2, seed2, additional_image_output, randomize_seed2, width2, height2, guidance_scale2, num_inference_steps2],
|
252 |
+
outputs[final_image_output, seed2]
|
253 |
)
|
254 |
|
255 |
demo.launch()
|