aifeifei798 commited on
Commit
2fe06d5
·
verified ·
1 Parent(s): e2e0459

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +22 -21
app.py CHANGED
@@ -17,7 +17,7 @@ def feifeimodload():
17
  device = "cuda" if torch.cuda.is_available() else "cpu"
18
 
19
  pipe = DiffusionPipeline.from_pretrained(
20
- "black-forest-labs/FLUX.1-schnell", torch_dtype=dtype
21
  ).to(device)
22
 
23
  pipe.load_lora_weights(
@@ -25,20 +25,8 @@ def feifeimodload():
25
  adapter_name="feifei",
26
  )
27
 
28
- pipe.set_adapters(
29
- ["feifei"],
30
- adapter_weights=[0.75],
31
- )
32
-
33
- pipe.fuse_lora(
34
- adapter_name=["feifei"],
35
- lora_scale=1.0,
36
- )
37
-
38
- #pipe.enable_sequential_cpu_offload()
39
  pipe.vae.enable_slicing()
40
  pipe.vae.enable_tiling()
41
- pipe.unload_lora_weights()
42
  torch.cuda.empty_cache()
43
  return pipe
44
 
@@ -47,15 +35,20 @@ MAX_SEED = np.iinfo(np.int32).max
47
  MAX_IMAGE_SIZE = 2048
48
 
49
  @spaces.GPU()
50
- def infer(prompt, seed=42, randomize_seed=False, width=1024, height=1024, num_inference_steps=4, progress=gr.Progress(track_tqdm=True)):
51
  if randomize_seed:
52
  seed = random.randint(0, MAX_SEED)
53
  generator = torch.Generator().manual_seed(seed)
54
- # prompt = f"feifei, real model girl in real life, {prompt}, slight smile, Master of Light and Shadow."
55
- # prompt = re.sub("young woman", "sexy feifei" ,prompt)
56
- # prompt = re.sub("woman", "sexy feifei" ,prompt)
57
- # prompt = re.sub("girl", "sexy feifei" ,prompt)
58
- # prompt = re.sub("model", "sexy feifei" ,prompt)
 
 
 
 
 
59
  image = pipe(
60
  prompt = prompt,
61
  width = width,
@@ -140,7 +133,6 @@ with gr.Blocks(css=css) as demo:
140
 
141
  with gr.Row():
142
 
143
-
144
  num_inference_steps = gr.Slider(
145
  label="Number of inference steps",
146
  minimum=1,
@@ -148,6 +140,15 @@ with gr.Blocks(css=css) as demo:
148
  step=1,
149
  value=4,
150
  )
 
 
 
 
 
 
 
 
 
151
 
152
  gr.Examples(
153
  examples = examples,
@@ -160,7 +161,7 @@ with gr.Blocks(css=css) as demo:
160
  gr.on(
161
  triggers=[run_button.click, prompt.submit],
162
  fn = infer,
163
- inputs = [prompt, seed, randomize_seed, width, height, num_inference_steps],
164
  outputs = [result, seed]
165
  )
166
 
 
17
  device = "cuda" if torch.cuda.is_available() else "cpu"
18
 
19
  pipe = DiffusionPipeline.from_pretrained(
20
+ "aifeifei798/DarkIdol-flux-v1", torch_dtype=dtype
21
  ).to(device)
22
 
23
  pipe.load_lora_weights(
 
25
  adapter_name="feifei",
26
  )
27
 
 
 
 
 
 
 
 
 
 
 
 
28
  pipe.vae.enable_slicing()
29
  pipe.vae.enable_tiling()
 
30
  torch.cuda.empty_cache()
31
  return pipe
32
 
 
35
  MAX_IMAGE_SIZE = 2048
36
 
37
  @spaces.GPU()
38
+ def infer(prompt, seed=42, randomize_seed=False, width=1024, height=1024, num_inference_steps=4, num_feifei=0.35, progress=gr.Progress(track_tqdm=True)):
39
  if randomize_seed:
40
  seed = random.randint(0, MAX_SEED)
41
  generator = torch.Generator().manual_seed(seed)
42
+
43
+ pipe.set_adapters(
44
+ ["feifei"],
45
+ adapter_weights=[num_feifei],
46
+ )
47
+
48
+ pipe.fuse_lora(
49
+ adapter_name=["feifei"],
50
+ lora_scale=1.0,
51
+ )
52
  image = pipe(
53
  prompt = prompt,
54
  width = width,
 
133
 
134
  with gr.Row():
135
 
 
136
  num_inference_steps = gr.Slider(
137
  label="Number of inference steps",
138
  minimum=1,
 
140
  step=1,
141
  value=4,
142
  )
143
+
144
+ with gr.Row():
145
+ num_feifei = gr.Slider(
146
+ label="FeiFei",
147
+ minimum=0,
148
+ maximum=2,
149
+ step=0.05,
150
+ value=0.35,
151
+ )
152
 
153
  gr.Examples(
154
  examples = examples,
 
161
  gr.on(
162
  triggers=[run_button.click, prompt.submit],
163
  fn = infer,
164
+ inputs = [prompt, seed, randomize_seed, width, height, num_inference_steps, num_feifei],
165
  outputs = [result, seed]
166
  )
167