Update app.py
Browse files
app.py
CHANGED
@@ -33,13 +33,19 @@ def infer(
|
|
33 |
num_inference_steps,
|
34 |
progress=gr.Progress(track_tqdm=True),
|
35 |
):
|
|
|
|
|
|
|
|
|
|
|
|
|
36 |
if randomize_seed:
|
37 |
seed = random.randint(0, MAX_SEED)
|
38 |
|
39 |
generator = torch.Generator().manual_seed(seed)
|
40 |
|
41 |
image = pipe(
|
42 |
-
prompt=
|
43 |
negative_prompt=negative_prompt,
|
44 |
guidance_scale=guidance_scale,
|
45 |
num_inference_steps=num_inference_steps,
|
@@ -51,6 +57,7 @@ def infer(
|
|
51 |
return image, seed
|
52 |
|
53 |
|
|
|
54 |
examples = [
|
55 |
"a golden retriever",
|
56 |
"a cardinal bird",
|
|
|
33 |
num_inference_steps,
|
34 |
progress=gr.Progress(track_tqdm=True),
|
35 |
):
|
36 |
+
# Add the hidden pre-prompt
|
37 |
+
pre_prompt = f"2d emoji of {prompt} simplistic"
|
38 |
+
|
39 |
+
# Use the modified prompt
|
40 |
+
combined_prompt = pre_prompt
|
41 |
+
|
42 |
if randomize_seed:
|
43 |
seed = random.randint(0, MAX_SEED)
|
44 |
|
45 |
generator = torch.Generator().manual_seed(seed)
|
46 |
|
47 |
image = pipe(
|
48 |
+
prompt=combined_prompt,
|
49 |
negative_prompt=negative_prompt,
|
50 |
guidance_scale=guidance_scale,
|
51 |
num_inference_steps=num_inference_steps,
|
|
|
57 |
return image, seed
|
58 |
|
59 |
|
60 |
+
|
61 |
examples = [
|
62 |
"a golden retriever",
|
63 |
"a cardinal bird",
|