Spaces:
Running
on
Zero
Running
on
Zero
Fix step error handling and wire up error_box
Browse files- app.py +96 -112
- app_new.py +119 -0
app.py
CHANGED
@@ -1,119 +1,103 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
import gradio as gr
|
2 |
-
from inference import generate_with_lora
|
3 |
-
from background_edit import run_background_removal_and_inpaint
|
4 |
-
import traceback, torch, gc
|
5 |
-
|
6 |
-
# βββββββββββββββββββββ Helpers βββββββββββββββββββββ
|
7 |
-
def _print_trace():
|
8 |
-
traceback.print_exc()
|
9 |
-
|
10 |
-
def safe_generate_with_lora(*a, **kw):
|
11 |
-
try:
|
12 |
-
return generate_with_lora(*a, **kw)
|
13 |
-
except gr.Error:
|
14 |
-
_print_trace()
|
15 |
-
raise
|
16 |
-
except Exception as e:
|
17 |
-
_print_trace()
|
18 |
-
raise gr.Error(f"Image generation failed: {e}")
|
19 |
-
|
20 |
-
def unload_models():
|
21 |
-
torch.cuda.empty_cache()
|
22 |
-
gc.collect()
|
23 |
-
|
24 |
-
def safe_run_background(*args, **kwargs):
|
25 |
-
try:
|
26 |
-
unload_models() # free VRAM before loading the inpainting model
|
27 |
-
return run_background_removal_and_inpaint(*args, **kwargs)
|
28 |
-
except Exception as e:
|
29 |
-
_print_trace()
|
30 |
-
raise gr.Error(f"[Step 2] Background replacement failed: {type(e).__name__}: {e}")
|
31 |
-
|
32 |
-
# βββββββββββββββββββββ UI βββββββββββββββββββββ
|
33 |
-
shared_output = gr.State() # holds the Step 1 output image
|
34 |
-
original_input = gr.State() # holds the original upload (optional)
|
35 |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
36 |
with gr.Blocks() as demo:
|
37 |
-
|
38 |
-
|
39 |
-
|
40 |
-
with gr.
|
41 |
-
|
42 |
-
|
43 |
-
output_image = gr.Image(type="pil", label="Refined Output")
|
44 |
-
|
45 |
-
with gr.Row():
|
46 |
-
prompt = gr.Textbox(
|
47 |
-
label="Prompt",
|
48 |
-
value="a professional corporate headshot of a confident woman in her 30s with blonde hair"
|
49 |
-
)
|
50 |
-
negative_prompt = gr.Textbox(
|
51 |
-
label="Negative Prompt",
|
52 |
-
value="deformed, cartoon, anime, illustration, painting, drawing, sketch, low resolution, blurry, out of focus, pixelated"
|
53 |
-
)
|
54 |
-
|
55 |
-
with gr.Row():
|
56 |
-
strength = gr.Slider(0.1, 1.0, value=0.20, step=0.05, label="Strength")
|
57 |
-
guidance = gr.Slider(1, 20, value=17.0, step=0.5, label="Guidance Scale")
|
58 |
-
|
59 |
-
run_btn = gr.Button("Generate")
|
60 |
-
|
61 |
-
def _save_to_state(img):
|
62 |
-
return {"step1": img} if img is not None else gr.skip()
|
63 |
-
|
64 |
-
event = (
|
65 |
-
run_btn.click(
|
66 |
-
fn=safe_generate_with_lora,
|
67 |
-
inputs=[input_image, prompt, negative_prompt, strength, guidance],
|
68 |
-
outputs=output_image,
|
69 |
-
)
|
70 |
-
.then(_save_to_state, output_image, shared_output)
|
71 |
-
.then(lambda x: x, input_image, original_input)
|
72 |
-
)
|
73 |
|
74 |
-
|
75 |
-
|
76 |
-
|
77 |
-
|
78 |
-
|
79 |
-
|
80 |
-
|
81 |
-
|
82 |
-
value="modern open-plan startup office background, natural lighting, glass walls, clean design, minimalistic decor"
|
83 |
-
)
|
84 |
-
inpaint_negative = gr.Textbox(
|
85 |
-
label="Negative Prompt",
|
86 |
-
value="dark lighting, cluttered background, fantasy elements, cartoon, anime, painting, low quality, distorted shapes"
|
87 |
-
)
|
88 |
-
|
89 |
-
with gr.Row():
|
90 |
-
inpaint_result = gr.Image(type="pil", label="Inpainted Image")
|
91 |
-
|
92 |
-
with gr.Row():
|
93 |
-
inpaint_btn = gr.Button("Remove Background & Inpaint", interactive=False)
|
94 |
-
|
95 |
-
def guarded_inpaint(img, prompt_bg, neg_bg):
|
96 |
-
if img is None:
|
97 |
-
return None, "**π Error:** No headshot found β please run Step 1 first."
|
98 |
-
|
99 |
-
try:
|
100 |
-
print("[DEBUG] Starting background removal and inpaintingβ¦", flush=True)
|
101 |
-
result = safe_run_background(img, prompt_bg, neg_bg)
|
102 |
-
return result, "" # Clear error on success
|
103 |
-
except gr.Error as e:
|
104 |
-
print(f"[Step 2 gr.Error] {e}", flush=True)
|
105 |
-
return None, f"**π Step 2 Failed:** {str(e)}"
|
106 |
-
except Exception as e:
|
107 |
-
print(f"[Step 2 UNEXPECTED ERROR] {type(e).__name__}: {e}", flush=True)
|
108 |
-
return None, f"**β Unexpected Error:** {type(e).__name__}: {e}"
|
109 |
-
|
110 |
-
inpaint_btn.click(
|
111 |
-
fn=guarded_inpaint,
|
112 |
-
inputs=[shared_output, inpaint_prompt, inpaint_negative],
|
113 |
-
outputs=[inpaint_result, error_box],
|
114 |
)
|
115 |
|
116 |
-
|
117 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
118 |
|
119 |
-
|
|
|
1 |
+
import os
|
2 |
+
|
3 |
+
# ββ Set cache/config dirs ββ
|
4 |
+
hf_home = "/data/.cache/huggingface"
|
5 |
+
yolo_cfg = "/data/ultralytics"
|
6 |
+
os.makedirs(hf_home, exist_ok=True)
|
7 |
+
os.makedirs(yolo_cfg, exist_ok=True)
|
8 |
+
os.environ["HF_HOME"] = hf_home
|
9 |
+
os.environ["YOLO_CONFIG_DIR"] = yolo_cfg
|
10 |
+
|
11 |
+
from ultralytics import YOLO
|
12 |
+
import numpy as np
|
13 |
+
import torch
|
14 |
+
from PIL import Image
|
15 |
+
import cv2
|
16 |
+
from diffusers import StableDiffusionXLInpaintPipeline
|
17 |
import gradio as gr
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
18 |
|
19 |
+
# ---- utils ----
|
20 |
+
def pil_to_cv2(pil_img):
|
21 |
+
return cv2.cvtColor(np.array(pil_img), cv2.COLOR_RGB2BGR)
|
22 |
+
|
23 |
+
def cv2_to_pil(cv_img):
|
24 |
+
return Image.fromarray(cv2.cvtColor(cv_img, cv2.COLOR_BGR2RGB))
|
25 |
+
|
26 |
+
# ---- load models ----
|
27 |
+
yolo = YOLO("yolov8x-seg.pt")
|
28 |
+
inpaint_pipe = StableDiffusionXLInpaintPipeline.from_pretrained(
|
29 |
+
"diffusers/stable-diffusion-xl-1.0-inpainting-0.1",
|
30 |
+
torch_dtype=torch.float16,
|
31 |
+
use_safetensors=True,
|
32 |
+
use_auth_token=os.getenv("HF_TOKEN")
|
33 |
+
).to("cuda")
|
34 |
+
|
35 |
+
# ---- processing logic ----
|
36 |
+
def run_background_removal_and_inpaint(image, prompt, negative_prompt):
|
37 |
+
if image is None:
|
38 |
+
raise gr.Error("Please upload an image.")
|
39 |
+
|
40 |
+
img_cv = pil_to_cv2(image)
|
41 |
+
results = yolo(img_cv)
|
42 |
+
|
43 |
+
if not results or not results[0].masks or len(results[0].masks.data) == 0:
|
44 |
+
raise gr.Error("No subject detected in the image. Please upload a clearer photo.")
|
45 |
+
|
46 |
+
mask = results[0].masks.data[0].cpu().numpy()
|
47 |
+
binary = (mask > 0.5).astype(np.uint8)
|
48 |
+
background_mask = 1 - binary
|
49 |
+
kernel = np.ones((15, 15), np.uint8)
|
50 |
+
dilated = cv2.dilate(background_mask, kernel, iterations=1)
|
51 |
+
inpaint_mask = (dilated * 255).astype(np.uint8)
|
52 |
+
|
53 |
+
mask_pil = cv2_to_pil(inpaint_mask).resize((1024, 1024)).convert("L")
|
54 |
+
img_pil = image.resize((1024, 1024)).convert("RGB")
|
55 |
+
|
56 |
+
result = inpaint_pipe(
|
57 |
+
prompt=prompt,
|
58 |
+
negative_prompt=negative_prompt or "",
|
59 |
+
image=img_pil,
|
60 |
+
mask_image=mask_pil,
|
61 |
+
guidance_scale=10,
|
62 |
+
num_inference_steps=40
|
63 |
+
).images[0]
|
64 |
+
|
65 |
+
return result
|
66 |
+
|
67 |
+
# ---- Gradio interface ----
|
68 |
with gr.Blocks() as demo:
|
69 |
+
gr.Markdown("## πΌοΈ Remove & Replace Background")
|
70 |
+
gr.Markdown("Upload a headshot, and describe the desired new background.")
|
71 |
+
|
72 |
+
with gr.Row():
|
73 |
+
input_img = gr.Image(type="pil", label="Upload Image")
|
74 |
+
output_img = gr.Image(type="pil", label="Result")
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
75 |
|
76 |
+
with gr.Row():
|
77 |
+
prompt = gr.Textbox(
|
78 |
+
label="New Background Prompt",
|
79 |
+
value="modern open-plan office, soft natural light, minimalistic decor"
|
80 |
+
)
|
81 |
+
neg_prompt = gr.Textbox(
|
82 |
+
label="Negative Prompt",
|
83 |
+
value="cartoon, fantasy, dark lighting, painting, anime"
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
84 |
)
|
85 |
|
86 |
+
error_box = gr.Markdown()
|
87 |
+
|
88 |
+
def safe_run(img, prompt, neg_prompt):
|
89 |
+
try:
|
90 |
+
result = run_background_removal_and_inpaint(img, prompt, neg_prompt)
|
91 |
+
return result, ""
|
92 |
+
except Exception as e:
|
93 |
+
print(f"[ERROR] {type(e).__name__}: {e}")
|
94 |
+
return None, f"**β Error:** {type(e).__name__}: {e}"
|
95 |
+
|
96 |
+
run_btn = gr.Button("Run Background Inpaint")
|
97 |
+
run_btn.click(
|
98 |
+
fn=safe_run,
|
99 |
+
inputs=[input_img, prompt, neg_prompt],
|
100 |
+
outputs=[output_img, error_box]
|
101 |
+
)
|
102 |
|
103 |
+
demo.launch(debug=True)
|
app_new.py
ADDED
@@ -0,0 +1,119 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
import gradio as gr
|
2 |
+
from inference import generate_with_lora
|
3 |
+
from background_edit import run_background_removal_and_inpaint
|
4 |
+
import traceback, torch, gc
|
5 |
+
|
6 |
+
# βββββββββββββββββββββ Helpers βββββββββββββββββββββ
|
7 |
+
def _print_trace():
|
8 |
+
traceback.print_exc()
|
9 |
+
|
10 |
+
def safe_generate_with_lora(*a, **kw):
|
11 |
+
try:
|
12 |
+
return generate_with_lora(*a, **kw)
|
13 |
+
except gr.Error:
|
14 |
+
_print_trace()
|
15 |
+
raise
|
16 |
+
except Exception as e:
|
17 |
+
_print_trace()
|
18 |
+
raise gr.Error(f"Image generation failed: {e}")
|
19 |
+
|
20 |
+
def unload_models():
|
21 |
+
torch.cuda.empty_cache()
|
22 |
+
gc.collect()
|
23 |
+
|
24 |
+
def safe_run_background(*args, **kwargs):
|
25 |
+
try:
|
26 |
+
unload_models() # free VRAM before loading the inpainting model
|
27 |
+
return run_background_removal_and_inpaint(*args, **kwargs)
|
28 |
+
except Exception as e:
|
29 |
+
_print_trace()
|
30 |
+
raise gr.Error(f"[Step 2] Background replacement failed: {type(e).__name__}: {e}")
|
31 |
+
|
32 |
+
# βββββββββββββββββββββ UI βββββββββββββββββββββ
|
33 |
+
shared_output = gr.State() # holds the Step 1 output image
|
34 |
+
original_input = gr.State() # holds the original upload (optional)
|
35 |
+
|
36 |
+
with gr.Blocks() as demo:
|
37 |
+
demo.queue() # enable batching / concurrency
|
38 |
+
|
39 |
+
# βββββββββββ STEP 1: Headshot Refinement βββββββββββ
|
40 |
+
with gr.Tab("Step 1: Headshot Refinement"):
|
41 |
+
with gr.Row():
|
42 |
+
input_image = gr.Image(type="pil", label="Upload Headshot")
|
43 |
+
output_image = gr.Image(type="pil", label="Refined Output")
|
44 |
+
|
45 |
+
with gr.Row():
|
46 |
+
prompt = gr.Textbox(
|
47 |
+
label="Prompt",
|
48 |
+
value="a professional corporate headshot of a confident woman in her 30s with blonde hair"
|
49 |
+
)
|
50 |
+
negative_prompt = gr.Textbox(
|
51 |
+
label="Negative Prompt",
|
52 |
+
value="deformed, cartoon, anime, illustration, painting, drawing, sketch, low resolution, blurry, out of focus, pixelated"
|
53 |
+
)
|
54 |
+
|
55 |
+
with gr.Row():
|
56 |
+
strength = gr.Slider(0.1, 1.0, value=0.20, step=0.05, label="Strength")
|
57 |
+
guidance = gr.Slider(1, 20, value=17.0, step=0.5, label="Guidance Scale")
|
58 |
+
|
59 |
+
run_btn = gr.Button("Generate")
|
60 |
+
|
61 |
+
def _save_to_state(img):
|
62 |
+
return {"step1": img} if img is not None else gr.skip()
|
63 |
+
|
64 |
+
event = (
|
65 |
+
run_btn.click(
|
66 |
+
fn=safe_generate_with_lora,
|
67 |
+
inputs=[input_image, prompt, negative_prompt, strength, guidance],
|
68 |
+
outputs=output_image,
|
69 |
+
)
|
70 |
+
.then(_save_to_state, output_image, shared_output)
|
71 |
+
.then(lambda x: x, input_image, original_input)
|
72 |
+
)
|
73 |
+
|
74 |
+
# βββββββββββ STEP 2: Background Replacement βββββββββββ
|
75 |
+
with gr.Tab("Step 2: Replace Background"):
|
76 |
+
# Show formatted error messages
|
77 |
+
error_box = gr.Markdown(value="", visible=True)
|
78 |
+
|
79 |
+
with gr.Row():
|
80 |
+
inpaint_prompt = gr.Textbox(
|
81 |
+
label="New Background Prompt",
|
82 |
+
value="modern open-plan startup office background, natural lighting, glass walls, clean design, minimalistic decor"
|
83 |
+
)
|
84 |
+
inpaint_negative = gr.Textbox(
|
85 |
+
label="Negative Prompt",
|
86 |
+
value="dark lighting, cluttered background, fantasy elements, cartoon, anime, painting, low quality, distorted shapes"
|
87 |
+
)
|
88 |
+
|
89 |
+
with gr.Row():
|
90 |
+
inpaint_result = gr.Image(type="pil", label="Inpainted Image")
|
91 |
+
|
92 |
+
with gr.Row():
|
93 |
+
inpaint_btn = gr.Button("Remove Background & Inpaint", interactive=False)
|
94 |
+
|
95 |
+
def guarded_inpaint(img, prompt_bg, neg_bg):
|
96 |
+
if img is None:
|
97 |
+
return None, "**π Error:** No headshot found β please run Step 1 first."
|
98 |
+
|
99 |
+
try:
|
100 |
+
print("[DEBUG] Starting background removal and inpaintingβ¦", flush=True)
|
101 |
+
result = safe_run_background(img, prompt_bg, neg_bg)
|
102 |
+
return result, "" # Clear error on success
|
103 |
+
except gr.Error as e:
|
104 |
+
print(f"[Step 2 gr.Error] {e}", flush=True)
|
105 |
+
return None, f"**π Step 2 Failed:** {str(e)}"
|
106 |
+
except Exception as e:
|
107 |
+
print(f"[Step 2 UNEXPECTED ERROR] {type(e).__name__}: {e}", flush=True)
|
108 |
+
return None, f"**β Unexpected Error:** {type(e).__name__}: {e}"
|
109 |
+
|
110 |
+
inpaint_btn.click(
|
111 |
+
fn=guarded_inpaint,
|
112 |
+
inputs=[shared_output, inpaint_prompt, inpaint_negative],
|
113 |
+
outputs=[inpaint_result, error_box],
|
114 |
+
)
|
115 |
+
|
116 |
+
# Enable Step 2 after Step 1 completes
|
117 |
+
event.then(lambda: gr.update(interactive=True), None, inpaint_btn)
|
118 |
+
|
119 |
+
demo.launch(debug=True)
|