Spaces:
Running
on
Zero
Running
on
Zero
Update app.py
Browse files
app.py
CHANGED
@@ -9,6 +9,15 @@ import torch
|
|
9 |
from diffusers import DiffusionPipeline
|
10 |
from PIL import Image
|
11 |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
12 |
# Create permanent storage directory
|
13 |
SAVE_DIR = "saved_images" # Gradio will handle the persistence
|
14 |
if not os.path.exists(SAVE_DIR):
|
@@ -18,7 +27,10 @@ device = "cuda" if torch.cuda.is_available() else "cpu"
|
|
18 |
repo_id = "black-forest-labs/FLUX.1-dev"
|
19 |
adapter_id = "seawolf2357/nsfw-detection" # Changed to Renoir model
|
20 |
|
|
|
|
|
21 |
pipeline = DiffusionPipeline.from_pretrained(repo_id, torch_dtype=torch.bfloat16)
|
|
|
22 |
pipeline.load_lora_weights(adapter_id)
|
23 |
pipeline = pipeline.to(device)
|
24 |
|
@@ -64,6 +76,21 @@ def load_predefined_images():
|
|
64 |
]
|
65 |
return predefined_images
|
66 |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
67 |
@spaces.GPU(duration=120)
|
68 |
def inference(
|
69 |
prompt: str,
|
@@ -76,12 +103,15 @@ def inference(
|
|
76 |
lora_scale: float,
|
77 |
progress: gr.Progress = gr.Progress(track_tqdm=True),
|
78 |
):
|
|
|
|
|
|
|
79 |
if randomize_seed:
|
80 |
seed = random.randint(0, MAX_SEED)
|
81 |
generator = torch.Generator(device=device).manual_seed(seed)
|
82 |
|
83 |
image = pipeline(
|
84 |
-
prompt=
|
85 |
guidance_scale=guidance_scale,
|
86 |
num_inference_steps=num_inference_steps,
|
87 |
width=width,
|
@@ -91,18 +121,18 @@ def inference(
|
|
91 |
).images[0]
|
92 |
|
93 |
# Save the generated image
|
94 |
-
filepath = save_generated_image(image,
|
95 |
|
96 |
# Return the image, seed, and updated gallery
|
97 |
-
return image, seed, load_generated_images()
|
98 |
|
99 |
examples = [
|
100 |
-
"
|
101 |
-
"
|
102 |
-
"
|
103 |
-
"
|
104 |
-
"
|
105 |
-
"
|
106 |
]
|
107 |
|
108 |
# Brighter custom CSS with vibrant colors
|
@@ -157,7 +187,7 @@ button:hover {
|
|
157 |
"""
|
158 |
|
159 |
with gr.Blocks(css=custom_css, analytics_enabled=False) as demo:
|
160 |
-
gr.HTML('<div class="title">
|
161 |
|
162 |
# Model description with the requested content
|
163 |
|
@@ -169,12 +199,13 @@ with gr.Blocks(css=custom_css, analytics_enabled=False) as demo:
|
|
169 |
label="Prompt",
|
170 |
show_label=False,
|
171 |
max_lines=1,
|
172 |
-
placeholder="Enter your prompt (
|
173 |
container=False,
|
174 |
)
|
175 |
run_button = gr.Button("Generate", variant="primary", scale=0)
|
176 |
|
177 |
result = gr.Image(label="Result", show_label=False)
|
|
|
178 |
|
179 |
with gr.Accordion("Advanced Settings", open=False):
|
180 |
seed = gr.Slider(
|
@@ -228,7 +259,7 @@ with gr.Blocks(css=custom_css, analytics_enabled=False) as demo:
|
|
228 |
gr.Examples(
|
229 |
examples=examples,
|
230 |
inputs=[prompt],
|
231 |
-
outputs=[result, seed],
|
232 |
)
|
233 |
|
234 |
with gr.Tab("Gallery"):
|
@@ -278,7 +309,7 @@ with gr.Blocks(css=custom_css, analytics_enabled=False) as demo:
|
|
278 |
num_inference_steps,
|
279 |
lora_scale,
|
280 |
],
|
281 |
-
outputs=[result, seed, generated_gallery],
|
282 |
)
|
283 |
|
284 |
demo.queue()
|
|
|
9 |
from diffusers import DiffusionPipeline
|
10 |
from PIL import Image
|
11 |
|
12 |
+
# Make sure PEFT is installed
|
13 |
+
try:
|
14 |
+
import peft
|
15 |
+
except ImportError:
|
16 |
+
import subprocess
|
17 |
+
print("Installing PEFT library...")
|
18 |
+
subprocess.check_call(["pip", "install", "peft"])
|
19 |
+
import peft
|
20 |
+
|
21 |
# Create permanent storage directory
|
22 |
SAVE_DIR = "saved_images" # Gradio will handle the persistence
|
23 |
if not os.path.exists(SAVE_DIR):
|
|
|
27 |
repo_id = "black-forest-labs/FLUX.1-dev"
|
28 |
adapter_id = "seawolf2357/nsfw-detection" # Changed to Renoir model
|
29 |
|
30 |
+
# Initialize pipeline with PEFT support
|
31 |
+
print("Loading pipeline...")
|
32 |
pipeline = DiffusionPipeline.from_pretrained(repo_id, torch_dtype=torch.bfloat16)
|
33 |
+
print("Loading LoRA weights...")
|
34 |
pipeline.load_lora_weights(adapter_id)
|
35 |
pipeline = pipeline.to(device)
|
36 |
|
|
|
76 |
]
|
77 |
return predefined_images
|
78 |
|
79 |
+
# Function to ensure "nsfw" and "[trigger]" are in the prompt
|
80 |
+
def process_prompt(prompt):
|
81 |
+
# Add "nsfw" prefix if not already present
|
82 |
+
if not prompt.lower().startswith("nsfw "):
|
83 |
+
prompt = "nsfw " + prompt
|
84 |
+
|
85 |
+
# Add "[trigger]" suffix if not already present
|
86 |
+
if not prompt.lower().endswith("[trigger]"):
|
87 |
+
if prompt.endswith(" "):
|
88 |
+
prompt = prompt + "[trigger]"
|
89 |
+
else:
|
90 |
+
prompt = prompt + " [trigger]"
|
91 |
+
|
92 |
+
return prompt
|
93 |
+
|
94 |
@spaces.GPU(duration=120)
|
95 |
def inference(
|
96 |
prompt: str,
|
|
|
103 |
lora_scale: float,
|
104 |
progress: gr.Progress = gr.Progress(track_tqdm=True),
|
105 |
):
|
106 |
+
# Process the prompt to ensure it has the required format
|
107 |
+
processed_prompt = process_prompt(prompt)
|
108 |
+
|
109 |
if randomize_seed:
|
110 |
seed = random.randint(0, MAX_SEED)
|
111 |
generator = torch.Generator(device=device).manual_seed(seed)
|
112 |
|
113 |
image = pipeline(
|
114 |
+
prompt=processed_prompt,
|
115 |
guidance_scale=guidance_scale,
|
116 |
num_inference_steps=num_inference_steps,
|
117 |
width=width,
|
|
|
121 |
).images[0]
|
122 |
|
123 |
# Save the generated image
|
124 |
+
filepath = save_generated_image(image, processed_prompt)
|
125 |
|
126 |
# Return the image, seed, and updated gallery
|
127 |
+
return image, seed, processed_prompt, load_generated_images()
|
128 |
|
129 |
examples = [
|
130 |
+
"painting of a lively outdoor dance scene at Moulin de la Galette, with dappled sunlight filtering through trees, illuminating well-dressed Parisians enjoying a summer afternoon. Couples dance while others socialize at tables, capturing the joie de vivre of 1870s Montmartre.",
|
131 |
+
"intimate portrait of a young woman with rosy cheeks and lips, soft blonde hair, and a gentle smile. She wears a vibrant blue dress against a background of lush flowers and greenery, showcasing his mastery of depicting feminine beauty with warm, luminous skin tones.",
|
132 |
+
"painting of two young girls seated at a piano, captured in his distinctive soft focus style. The scene shows one girl playing while the other stands beside her, both wearing delicate white dresses. The interior setting features warm colors and loose brushwork typical of his mature period.",
|
133 |
+
"painting of an elegant boating party, with fashionably dressed men and women relaxing on a restaurant terrace overlooking the Seine. The scene captures the leisurely atmosphere of 1880s French society, with sparkling water reflections and a bright, airy palette of blues, whites, and warm flesh tones.",
|
134 |
+
"painting of a sun-dappled garden scene with children playing. The composition features vibrant flowers in full bloom, lush greenery, and Renoir's characteristic luminous treatment of sunlight filtering through foliage, creating patches of brilliant color across the canvas.",
|
135 |
+
"depiction of bathers by a riverbank, with several female figures arranged in a harmonious composition. The painting showcases his later style with fuller figures rendered in pearlescent flesh tones against a backdrop of shimmering water and verdant landscape, demonstrating his unique approach to the nude figure in nature."
|
136 |
]
|
137 |
|
138 |
# Brighter custom CSS with vibrant colors
|
|
|
187 |
"""
|
188 |
|
189 |
with gr.Blocks(css=custom_css, analytics_enabled=False) as demo:
|
190 |
+
gr.HTML('<div class="title">NSFW Detection STUDIO</div>')
|
191 |
|
192 |
# Model description with the requested content
|
193 |
|
|
|
199 |
label="Prompt",
|
200 |
show_label=False,
|
201 |
max_lines=1,
|
202 |
+
placeholder="Enter your prompt (nsfw and [trigger] will be added automatically)",
|
203 |
container=False,
|
204 |
)
|
205 |
run_button = gr.Button("Generate", variant="primary", scale=0)
|
206 |
|
207 |
result = gr.Image(label="Result", show_label=False)
|
208 |
+
processed_prompt_display = gr.Textbox(label="Processed Prompt", show_label=True)
|
209 |
|
210 |
with gr.Accordion("Advanced Settings", open=False):
|
211 |
seed = gr.Slider(
|
|
|
259 |
gr.Examples(
|
260 |
examples=examples,
|
261 |
inputs=[prompt],
|
262 |
+
outputs=[result, seed, processed_prompt_display],
|
263 |
)
|
264 |
|
265 |
with gr.Tab("Gallery"):
|
|
|
309 |
num_inference_steps,
|
310 |
lora_scale,
|
311 |
],
|
312 |
+
outputs=[result, seed, processed_prompt_display, generated_gallery],
|
313 |
)
|
314 |
|
315 |
demo.queue()
|