Spaces:
Paused
Paused
Update app.py
Browse files
app.py
CHANGED
@@ -3,7 +3,6 @@
|
|
3 |
import gradio as gr
|
4 |
from PIL import Image
|
5 |
import torch
|
6 |
-
|
7 |
from diffusers import StableDiffusionXLImg2ImgPipeline
|
8 |
from utils.planner import (
|
9 |
extract_scene_plan,
|
@@ -12,13 +11,13 @@ from utils.planner import (
|
|
12 |
)
|
13 |
|
14 |
# ----------------------------
|
15 |
-
#
|
16 |
# ----------------------------
|
17 |
device = "cuda" if torch.cuda.is_available() else "cpu"
|
18 |
dtype = torch.float16 if device == "cuda" else torch.float32
|
19 |
|
20 |
# ----------------------------
|
21 |
-
#
|
22 |
# ----------------------------
|
23 |
pipe = StableDiffusionXLImg2ImgPipeline.from_pretrained(
|
24 |
"stabilityai/stable-diffusion-xl-base-1.0",
|
@@ -31,38 +30,38 @@ pipe.enable_model_cpu_offload()
|
|
31 |
pipe.enable_attention_slicing()
|
32 |
|
33 |
# ----------------------------
|
34 |
-
#
|
35 |
# ----------------------------
|
36 |
def process_image(prompt, image, num_variations):
|
37 |
try:
|
38 |
-
print("π§
|
39 |
if image is None:
|
40 |
-
raise ValueError("π«
|
41 |
|
42 |
-
# Step 1:
|
43 |
scene_plan = extract_scene_plan(prompt, image)
|
44 |
print("π Scene Plan:", scene_plan)
|
45 |
|
46 |
-
# Step 2: Generate
|
47 |
-
|
48 |
-
print("β
Enriched Prompts:",
|
49 |
|
50 |
# Step 3: Generate negative prompt
|
51 |
negative_prompt = generate_negative_prompt_from_scene(scene_plan)
|
52 |
print("π« Negative Prompt:", negative_prompt)
|
53 |
|
54 |
-
# Step 4: Resize image to
|
55 |
image = image.resize((1024, 1024)).convert("RGB")
|
56 |
|
57 |
-
# Step 5: Generate
|
58 |
outputs = []
|
59 |
-
for i, enriched_prompt in enumerate(
|
60 |
-
print(f"
|
61 |
result = pipe(
|
62 |
prompt=enriched_prompt,
|
63 |
negative_prompt=negative_prompt,
|
64 |
image=image,
|
65 |
-
strength=0.7,
|
66 |
guidance_scale=7.5,
|
67 |
num_inference_steps=30,
|
68 |
)
|
@@ -71,7 +70,7 @@ def process_image(prompt, image, num_variations):
|
|
71 |
return outputs
|
72 |
|
73 |
except Exception as e:
|
74 |
-
print("β Generation
|
75 |
return [Image.new("RGB", (512, 512), color="red")]
|
76 |
|
77 |
# ----------------------------
|
@@ -84,10 +83,14 @@ demo = gr.Interface(
|
|
84 |
gr.Image(type="pil", label="Product Image"),
|
85 |
gr.Slider(1, 5, value=3, step=1, label="Number of Variations")
|
86 |
],
|
87 |
-
|
88 |
title="NewCrux Product Image Generator (SDXL Only)",
|
89 |
-
description="Upload a product image and
|
90 |
)
|
91 |
|
|
|
|
|
|
|
92 |
if __name__ == "__main__":
|
93 |
demo.launch()
|
|
|
|
3 |
import gradio as gr
|
4 |
from PIL import Image
|
5 |
import torch
|
|
|
6 |
from diffusers import StableDiffusionXLImg2ImgPipeline
|
7 |
from utils.planner import (
|
8 |
extract_scene_plan,
|
|
|
11 |
)
|
12 |
|
13 |
# ----------------------------
|
14 |
+
# π₯οΈ Device Setup
|
15 |
# ----------------------------
|
16 |
device = "cuda" if torch.cuda.is_available() else "cpu"
|
17 |
dtype = torch.float16 if device == "cuda" else torch.float32
|
18 |
|
19 |
# ----------------------------
|
20 |
+
# π§ Load SDXL Img2Img Pipeline (no ControlNet)
|
21 |
# ----------------------------
|
22 |
pipe = StableDiffusionXLImg2ImgPipeline.from_pretrained(
|
23 |
"stabilityai/stable-diffusion-xl-base-1.0",
|
|
|
30 |
pipe.enable_attention_slicing()
|
31 |
|
32 |
# ----------------------------
|
33 |
+
# π― Image Generation Function
|
34 |
# ----------------------------
|
35 |
def process_image(prompt, image, num_variations):
|
36 |
try:
|
37 |
+
print("π§ Prompt received:", prompt)
|
38 |
if image is None:
|
39 |
+
raise ValueError("π« Please upload an image.")
|
40 |
|
41 |
+
# Step 1: Analyze image + prompt β structured plan
|
42 |
scene_plan = extract_scene_plan(prompt, image)
|
43 |
print("π Scene Plan:", scene_plan)
|
44 |
|
45 |
+
# Step 2: Generate prompt variations
|
46 |
+
enriched_prompts = generate_prompt_variations_from_scene(scene_plan, prompt, num_variations)
|
47 |
+
print("β
Enriched Prompts:", enriched_prompts)
|
48 |
|
49 |
# Step 3: Generate negative prompt
|
50 |
negative_prompt = generate_negative_prompt_from_scene(scene_plan)
|
51 |
print("π« Negative Prompt:", negative_prompt)
|
52 |
|
53 |
+
# Step 4: Resize image to 1024x1024
|
54 |
image = image.resize((1024, 1024)).convert("RGB")
|
55 |
|
56 |
+
# Step 5: Generate variations
|
57 |
outputs = []
|
58 |
+
for i, enriched_prompt in enumerate(enriched_prompts):
|
59 |
+
print(f"β¨ Generating Image {i + 1}...")
|
60 |
result = pipe(
|
61 |
prompt=enriched_prompt,
|
62 |
negative_prompt=negative_prompt,
|
63 |
image=image,
|
64 |
+
strength=0.7,
|
65 |
guidance_scale=7.5,
|
66 |
num_inference_steps=30,
|
67 |
)
|
|
|
70 |
return outputs
|
71 |
|
72 |
except Exception as e:
|
73 |
+
print("β Generation failed:", e)
|
74 |
return [Image.new("RGB", (512, 512), color="red")]
|
75 |
|
76 |
# ----------------------------
|
|
|
83 |
gr.Image(type="pil", label="Product Image"),
|
84 |
gr.Slider(1, 5, value=3, step=1, label="Number of Variations")
|
85 |
],
|
86 |
+
outputs=gr.Gallery(label="Generated Images"),
|
87 |
title="NewCrux Product Image Generator (SDXL Only)",
|
88 |
+
description="Upload a product image and describe your vision. NewCrux will generate multiple enriched image variations using SDXL + AI prompt planning."
|
89 |
)
|
90 |
|
91 |
+
# ----------------------------
|
92 |
+
# π Launch
|
93 |
+
# ----------------------------
|
94 |
if __name__ == "__main__":
|
95 |
demo.launch()
|
96 |
+
|