Spaces:
Running
on
Zero
Running
on
Zero
Update app.py
Browse files
app.py
CHANGED
@@ -9,7 +9,10 @@ from diffusers import (
|
|
9 |
AutoencoderTiny,
|
10 |
)
|
11 |
from huggingface_hub import hf_hub_download
|
12 |
-
|
|
|
|
|
|
|
13 |
|
14 |
def feifeimodload():
|
15 |
|
@@ -36,11 +39,9 @@ def feifeimodload():
|
|
36 |
return pipe
|
37 |
|
38 |
pipe = feifeimodload()
|
39 |
-
MAX_SEED = np.iinfo(np.int32).max
|
40 |
-
MAX_IMAGE_SIZE = 2048
|
41 |
|
42 |
@spaces.GPU()
|
43 |
-
def infer(prompt="", Expansion_select=False, seed=42, randomize_seed=False, width=1024, height=1024, num_inference_steps=4, guidancescale=3.5, num_feifei=0.35, nsfw_select=False, nsfw_slider=1, progress=gr.Progress(track_tqdm=True)):
|
44 |
if randomize_seed:
|
45 |
seed = random.randint(0, MAX_SEED)
|
46 |
generator = torch.Generator().manual_seed(seed)
|
@@ -73,7 +74,11 @@ def infer(prompt="", Expansion_select=False, seed=42, randomize_seed=False, widt
|
|
73 |
prompt = re.sub(r"young woman", replacement, prompt, flags=re.IGNORECASE)
|
74 |
prompt = re.sub(r"woman", replacement, prompt, flags=re.IGNORECASE)
|
75 |
prompt = re.sub(r"model", replacement, prompt, flags=re.IGNORECASE)
|
76 |
-
|
|
|
|
|
|
|
|
|
77 |
image = pipe(
|
78 |
prompt = "flux, 8k, ",
|
79 |
prompt_2 = prompt,
|
@@ -109,15 +114,22 @@ with gr.Blocks(css=css) as demo:
|
|
109 |
|
110 |
with gr.Column(elem_id="col-container"):
|
111 |
gr.Markdown(f"""# FLUX.1 + feifei-flux-lora """)
|
112 |
-
|
113 |
-
|
114 |
-
|
115 |
-
|
116 |
-
|
117 |
-
|
118 |
-
|
119 |
-
|
120 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
121 |
run_button = gr.Button("Run")
|
122 |
result = gr.Image(label="Result", show_label=False,height=520)
|
123 |
Expansion_select = gr.Checkbox(label="FeiFei Expansion")
|
@@ -200,7 +212,7 @@ with gr.Blocks(css=css) as demo:
|
|
200 |
gr.on(
|
201 |
triggers=[run_button.click, prompt.submit],
|
202 |
fn = infer,
|
203 |
-
inputs = [prompt, Expansion_select, seed, randomize_seed, width, height, num_inference_steps, guidancescale, num_feifei, nsfw_select, nsfw_slider],
|
204 |
outputs = [result, seed]
|
205 |
)
|
206 |
|
|
|
9 |
AutoencoderTiny,
|
10 |
)
|
11 |
from huggingface_hub import hf_hub_download
|
12 |
+
import config
|
13 |
+
styles_name = [style["name"] for style in config.style_list]
|
14 |
+
MAX_SEED = np.iinfo(np.int32).max
|
15 |
+
MAX_IMAGE_SIZE = 2048
|
16 |
|
17 |
def feifeimodload():
|
18 |
|
|
|
39 |
return pipe
|
40 |
|
41 |
pipe = feifeimodload()
|
|
|
|
|
42 |
|
43 |
@spaces.GPU()
|
44 |
+
def infer(prompt="", styles_Radio="(None)", Expansion_select=False, seed=42, randomize_seed=False, width=1024, height=1024, num_inference_steps=4, guidancescale=3.5, num_feifei=0.35, nsfw_select=False, nsfw_slider=1, progress=gr.Progress(track_tqdm=True)):
|
45 |
if randomize_seed:
|
46 |
seed = random.randint(0, MAX_SEED)
|
47 |
generator = torch.Generator().manual_seed(seed)
|
|
|
74 |
prompt = re.sub(r"young woman", replacement, prompt, flags=re.IGNORECASE)
|
75 |
prompt = re.sub(r"woman", replacement, prompt, flags=re.IGNORECASE)
|
76 |
prompt = re.sub(r"model", replacement, prompt, flags=re.IGNORECASE)
|
77 |
+
if styles_Radio:
|
78 |
+
style_name = styles_Radio
|
79 |
+
for style in config.style_list:
|
80 |
+
if style["name"] == style_name:
|
81 |
+
prompt = style["prompt"].replace("{prompt}", prompt)
|
82 |
image = pipe(
|
83 |
prompt = "flux, 8k, ",
|
84 |
prompt_2 = prompt,
|
|
|
114 |
|
115 |
with gr.Column(elem_id="col-container"):
|
116 |
gr.Markdown(f"""# FLUX.1 + feifei-flux-lora """)
|
117 |
+
with gr.Row():
|
118 |
+
prompt = gr.Text(
|
119 |
+
label="Prompt",
|
120 |
+
show_label=False,
|
121 |
+
max_lines=12,
|
122 |
+
placeholder="Enter your prompt",
|
123 |
+
value="",
|
124 |
+
container=False,
|
125 |
+
)
|
126 |
+
with gr.Row():
|
127 |
+
styles_Radio = gr.Dropdown(
|
128 |
+
styles_name,
|
129 |
+
label="Styles",
|
130 |
+
multiselect=False,
|
131 |
+
value="(None)",
|
132 |
+
)
|
133 |
run_button = gr.Button("Run")
|
134 |
result = gr.Image(label="Result", show_label=False,height=520)
|
135 |
Expansion_select = gr.Checkbox(label="FeiFei Expansion")
|
|
|
212 |
gr.on(
|
213 |
triggers=[run_button.click, prompt.submit],
|
214 |
fn = infer,
|
215 |
+
inputs = [prompt, styles_Radio, Expansion_select, seed, randomize_seed, width, height, num_inference_steps, guidancescale, num_feifei, nsfw_select, nsfw_slider],
|
216 |
outputs = [result, seed]
|
217 |
)
|
218 |
|