prithivMLmods commited on
Commit
2ef0c2a
·
verified ·
1 Parent(s): af3edc7

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +231 -138
app.py CHANGED
@@ -2,52 +2,115 @@ import os
2
  import random
3
  import uuid
4
  import json
 
5
  import gradio as gr
6
  import numpy as np
7
  from PIL import Image
8
  import spaces
9
  import torch
 
 
10
  from diffusers import StableDiffusionXLPipeline, EulerAncestralDiscreteScheduler
 
11
 
12
- css = '''
13
- .gradio-container {
14
- max-width: 150%;
15
- margin: 0 auto;
16
- }
17
- h1 { text-align: center; }
18
- footer { visibility: hidden; }
19
- '''
20
 
21
- examples = [
22
- "3d image, cute girl, in the style of Pixar --ar 1:2 --stylize 750, 4K resolution highlights, Sharp focus, octane render, ray tracing, Ultra-High-Definition, 8k, UHD, HDR, (Masterpiece:1.5), (best quality:1.5)",
23
- "A glass cup of cold coffee placed on a rustic wooden table, surrounded by soft morning light. The coffee is rich, dark, and topped with a light layer of creamy froth, droplets of condensation sliding down the glass.",
24
- "Vector illustration of a horse, vector graphic design with flat colors on an brown background in the style of vector art, using simple shapes and graphics with simple details, professionally designed as a tshirt logo ready for print on a white background. --ar 89:82 --v 6.0 --style raw",
25
- "Man in brown leather jacket posing for camera, in the style of sleek and stylized, clockpunk, subtle shades, exacting precision, ferrania p30 --ar 67:101 --v 5",
26
- "Commercial photography, giant burger, white lighting, studio light, 8k octane rendering, high resolution photography, insanely detailed, fine details, on white isolated plain, 8k, commercial photography, stock photo, professional color grading, --v 4 --ar 9:16 "
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
27
  ]
28
 
29
- MODEL_ID = os.getenv("MODEL_VAL_PATH") # SG161222/RealVisXL_V5.0_Lightning or SG161222/RealVisXL_V4.0_Lightning
30
- MAX_IMAGE_SIZE = int(os.getenv("MAX_IMAGE_SIZE", "4096"))
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
31
  USE_TORCH_COMPILE = os.getenv("USE_TORCH_COMPILE", "0") == "1"
32
  ENABLE_CPU_OFFLOAD = os.getenv("ENABLE_CPU_OFFLOAD", "0") == "1"
33
- BATCH_SIZE = int(os.getenv("BATCH_SIZE", "1"))
34
 
35
  device = torch.device("cuda:0" if torch.cuda.is_available() else "cpu")
36
- pipe = StableDiffusionXLPipeline.from_pretrained(
37
- MODEL_ID,
38
- torch_dtype=torch.float16 if torch.cuda.is_available() else torch.float32,
39
- use_safetensors=True,
40
- add_watermarker=False,
41
- ).to(device)
42
- pipe.scheduler = EulerAncestralDiscreteScheduler.from_config(pipe.scheduler.config)
43
 
44
- if USE_TORCH_COMPILE:
45
- pipe.compile()
46
 
47
- if ENABLE_CPU_OFFLOAD:
48
- pipe.enable_model_cpu_offload()
49
-
50
- MAX_SEED = np.iinfo(np.int32).max
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
51
 
52
  def save_image(img):
53
  unique_name = str(uuid.uuid4()) + ".png"
@@ -59,137 +122,168 @@ def randomize_seed_fn(seed: int, randomize_seed: bool) -> int:
59
  seed = random.randint(0, MAX_SEED)
60
  return seed
61
 
62
- @spaces.GPU
63
  def generate(
64
  prompt: str,
65
  negative_prompt: str = "",
66
  use_negative_prompt: bool = False,
67
- seed: int = 1,
 
68
  width: int = 1024,
69
  height: int = 1024,
70
  guidance_scale: float = 3,
71
- num_inference_steps: int = 25,
72
  randomize_seed: bool = False,
73
  use_resolution_binning: bool = True,
74
- num_images: int = 10,
75
  progress=gr.Progress(track_tqdm=True),
76
  ):
 
 
 
 
77
  seed = int(randomize_seed_fn(seed, randomize_seed))
78
- generator = torch.Generator(device=device).manual_seed(seed)
 
 
 
 
79
 
80
  options = {
81
- "prompt": [prompt] * num_images,
82
- "negative_prompt": [negative_prompt] * num_images if use_negative_prompt else None,
83
  "width": width,
84
  "height": height,
85
  "guidance_scale": guidance_scale,
86
- "num_inference_steps": num_inference_steps,
87
  "generator": generator,
 
 
88
  "output_type": "pil",
89
  }
90
-
91
- if use_resolution_binning:
92
- options["use_resolution_binning"] = True
93
-
94
- images = []
95
- for i in range(0, num_images, BATCH_SIZE):
96
- batch_options = options.copy()
97
- batch_options["prompt"] = options["prompt"][i:i+BATCH_SIZE]
98
- if "negative_prompt" in batch_options:
99
- batch_options["negative_prompt"] = options["negative_prompt"][i:i+BATCH_SIZE]
100
- images.extend(pipe(**batch_options).images)
101
 
102
  image_paths = [save_image(img) for img in images]
103
  return image_paths, seed
104
 
105
- with gr.Blocks(css=css) as demo:
106
- gr.Markdown("## SDXL:Text to Image [10-Images]")
107
-
108
- with gr.Row():
109
- with gr.Column(scale=1):
110
- with gr.Row():
111
- prompt = gr.Text(
112
- show_label=False,
113
- max_lines=1,
114
- placeholder="Enter your prompt",
115
- container=False,
116
- )
117
- run_button = gr.Button("Run", scale=0, variant="primary")
118
-
119
- result = gr.Gallery(show_label=False, format="png", columns=2, object_fit="contain")
120
-
121
- with gr.Accordion("Advanced Settings", open=False):
122
- num_images = gr.Slider(
123
- label="Number of Images",
124
- minimum=1,
125
- maximum=15,
126
- step=1,
127
- value=10,
128
- )
129
- with gr.Row():
130
- use_negative_prompt = gr.Checkbox(label="Use negative prompt", value=True)
131
- negative_prompt = gr.Text(
132
- label="Negative prompt",
133
- max_lines=5,
134
- lines=4,
135
- placeholder="Enter a negative prompt",
136
- value="(deformed, distorted, disfigured:1.3), poorly drawn, bad anatomy, wrong anatomy, extra limb, missing limb, floating limbs, (mutated hands and fingers:1.4), disconnected limbs, mutation, mutated, ugly, disgusting, blurry, amputation",
137
- visible=True,
138
- )
139
- seed = gr.Slider(
140
- label="Seed",
141
- minimum=0,
142
- maximum=MAX_SEED,
143
- step=1,
144
- value=0,
145
- )
146
- randomize_seed = gr.Checkbox(label="Randomize seed", value=True)
147
-
148
- with gr.Row(visible=True):
149
- width = gr.Slider(
150
- label="Width",
151
- minimum=512,
152
- maximum=MAX_IMAGE_SIZE,
153
- step=64,
154
- value=1024,
155
- )
156
- height = gr.Slider(
157
- label="Height",
158
- minimum=512,
159
- maximum=MAX_IMAGE_SIZE,
160
- step=64,
161
- value=1024,
162
- )
163
- with gr.Row():
164
- guidance_scale = gr.Slider(
165
- label="Guidance Scale",
166
- minimum=0.1,
167
- maximum=6,
168
- step=0.1,
169
- value=3.0,
170
- )
171
- num_inference_steps = gr.Slider(
172
- label="Number of inference steps",
173
- minimum=1,
174
- maximum=25,
175
- step=1,
176
- value=23,
177
- )
178
-
179
- with gr.Column(scale=1):
180
- gr.Examples(
181
- examples=examples,
182
- inputs=prompt,
183
- cache_examples=False,
184
- )
185
 
186
- use_negative_prompt.change(
187
- fn=lambda x: gr.update(visible=x),
188
- inputs=use_negative_prompt,
189
- outputs=negative_prompt,
190
- api_name=False,
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
191
  )
192
-
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
193
  gr.on(
194
  triggers=[
195
  prompt.submit,
@@ -201,17 +295,16 @@ with gr.Blocks(css=css) as demo:
201
  prompt,
202
  negative_prompt,
203
  use_negative_prompt,
 
204
  seed,
205
  width,
206
  height,
207
  guidance_scale,
208
- num_inference_steps,
209
  randomize_seed,
210
- num_images
211
  ],
212
  outputs=[result, seed],
213
  api_name="run",
214
  )
215
 
216
  if __name__ == "__main__":
217
- demo.queue(max_size=40).launch(ssr_mode=False)
 
2
  import random
3
  import uuid
4
  import json
5
+
6
  import gradio as gr
7
  import numpy as np
8
  from PIL import Image
9
  import spaces
10
  import torch
11
+
12
+ from diffusers import DiffusionPipeline
13
  from diffusers import StableDiffusionXLPipeline, EulerAncestralDiscreteScheduler
14
+ from typing import Tuple
15
 
16
+ bad_words = json.loads(os.getenv('BAD_WORDS', "[]"))
17
+ bad_words_negative = json.loads(os.getenv('BAD_WORDS_NEGATIVE', "[]"))
18
+ default_negative = os.getenv("default_negative","")
 
 
 
 
 
19
 
20
+ def check_text(prompt, negative=""):
21
+ for i in bad_words:
22
+ if i in prompt:
23
+ return True
24
+ for i in bad_words_negative:
25
+ if i in negative:
26
+ return True
27
+ return False
28
+
29
+ style_list = [
30
+
31
+ {
32
+ "name": "Photo",
33
+ "prompt": "cinematic photo {prompt}. 35mm photograph, film, bokeh, professional, 4k, highly detailed",
34
+ "negative_prompt": "drawing, painting, crayon, sketch, graphite, impressionist, noisy, blurry, soft, deformed, ugly",
35
+ },
36
+
37
+ {
38
+ "name": "Cinematic",
39
+ "prompt": "cinematic still {prompt}. emotional, harmonious, vignette, highly detailed, high budget, bokeh, cinemascope, moody, epic, gorgeous, film grain, grainy",
40
+ "negative_prompt": "anime, cartoon, graphic, text, painting, crayon, graphite, abstract, glitch, deformed, mutated, ugly, disfigured",
41
+ },
42
+
43
+ {
44
+ "name": "Anime",
45
+ "prompt": "anime artwork {prompt}. anime style, key visual, vibrant, studio anime, highly detailed",
46
+ "negative_prompt": "photo, deformed, black and white, realism, disfigured, low contrast",
47
+ },
48
+ {
49
+ "name": "3D Model",
50
+ "prompt": "professional 3d model {prompt}. octane render, highly detailed, volumetric, dramatic lighting",
51
+ "negative_prompt": "ugly, deformed, noisy, low poly, blurry, painting",
52
+ },
53
+ {
54
+ "name": "(No style)",
55
+ "prompt": "{prompt}",
56
+ "negative_prompt": "",
57
+ },
58
  ]
59
 
60
+ styles = {k["name"]: (k["prompt"], k["negative_prompt"]) for k in style_list}
61
+ STYLE_NAMES = list(styles.keys())
62
+ DEFAULT_STYLE_NAME = "Photo"
63
+
64
+ def apply_style(style_name: str, positive: str, negative: str = "") -> Tuple[str, str]:
65
+ p, n = styles.get(style_name, styles[DEFAULT_STYLE_NAME])
66
+ if not negative:
67
+ negative = ""
68
+ return p.replace("{prompt}", positive), n + negative
69
+
70
+ DESCRIPTION = """## Text to Image
71
+
72
+ """
73
+
74
+ if not torch.cuda.is_available():
75
+ DESCRIPTION += "\n<p>⚠️Running on CPU, This may not work on CPU.</p>"
76
+
77
+ MAX_SEED = np.iinfo(np.int32).max
78
+ CACHE_EXAMPLES = torch.cuda.is_available() and os.getenv("CACHE_EXAMPLES", "0") == "1"
79
+ MAX_IMAGE_SIZE = int(os.getenv("MAX_IMAGE_SIZE", "2048"))
80
  USE_TORCH_COMPILE = os.getenv("USE_TORCH_COMPILE", "0") == "1"
81
  ENABLE_CPU_OFFLOAD = os.getenv("ENABLE_CPU_OFFLOAD", "0") == "1"
 
82
 
83
  device = torch.device("cuda:0" if torch.cuda.is_available() else "cpu")
 
 
 
 
 
 
 
84
 
85
+ NUM_IMAGES_PER_PROMPT = 1
 
86
 
87
+ if torch.cuda.is_available():
88
+ pipe = StableDiffusionXLPipeline.from_pretrained(
89
+ "SG161222/RealVisXL_V5.0_Lightning",
90
+ torch_dtype=torch.float16,
91
+ use_safetensors=True,
92
+ add_watermarker=False,
93
+ variant="fp16"
94
+ )
95
+ pipe2 = StableDiffusionXLPipeline.from_pretrained(
96
+ "SG161222/RealVisXL_V4.0_Lightning",
97
+ torch_dtype=torch.float16,
98
+ use_safetensors=True,
99
+ add_watermarker=False,
100
+ variant="fp16"
101
+ )
102
+ if ENABLE_CPU_OFFLOAD:
103
+ pipe.enable_model_cpu_offload()
104
+ pipe2.enable_model_cpu_offload()
105
+ else:
106
+ pipe.to(device)
107
+ pipe2.to(device)
108
+ print("Loaded on Device!")
109
+
110
+ if USE_TORCH_COMPILE:
111
+ pipe.unet = torch.compile(pipe.unet, mode="reduce-overhead", fullgraph=True)
112
+ pipe2.unet = torch.compile(pipe2.unet, mode="reduce-overhead", fullgraph=True)
113
+ print("Model Compiled!")
114
 
115
  def save_image(img):
116
  unique_name = str(uuid.uuid4()) + ".png"
 
122
  seed = random.randint(0, MAX_SEED)
123
  return seed
124
 
125
+ @spaces.GPU(duration=30)
126
  def generate(
127
  prompt: str,
128
  negative_prompt: str = "",
129
  use_negative_prompt: bool = False,
130
+ style: str = DEFAULT_STYLE_NAME,
131
+ seed: int = 0,
132
  width: int = 1024,
133
  height: int = 1024,
134
  guidance_scale: float = 3,
 
135
  randomize_seed: bool = False,
136
  use_resolution_binning: bool = True,
 
137
  progress=gr.Progress(track_tqdm=True),
138
  ):
139
+ if check_text(prompt, negative_prompt):
140
+ raise ValueError("Prompt contains restricted words.")
141
+
142
+ prompt, negative_prompt = apply_style(style, prompt, negative_prompt)
143
  seed = int(randomize_seed_fn(seed, randomize_seed))
144
+ generator = torch.Generator().manual_seed(seed)
145
+
146
+ if not use_negative_prompt:
147
+ negative_prompt = "" # type: ignore
148
+ negative_prompt += default_negative
149
 
150
  options = {
151
+ "prompt": prompt,
152
+ "negative_prompt": negative_prompt,
153
  "width": width,
154
  "height": height,
155
  "guidance_scale": guidance_scale,
156
+ "num_inference_steps": 25,
157
  "generator": generator,
158
+ "num_images_per_prompt": NUM_IMAGES_PER_PROMPT,
159
+ "use_resolution_binning": use_resolution_binning,
160
  "output_type": "pil",
161
  }
162
+
163
+ images = pipe(**options).images + pipe2(**options).images
 
 
 
 
 
 
 
 
 
164
 
165
  image_paths = [save_image(img) for img in images]
166
  return image_paths, seed
167
 
168
+ examples = [
169
+ "3d image, cute girl, in the style of Pixar --ar 1:2 --stylize 750, 4K resolution highlights, Sharp focus, octane render, ray tracing, Ultra-High-Definition, 8k, UHD, HDR, (Masterpiece:1.5), (best quality:1.5)",
170
+ "A glass cup of cold coffee placed on a rustic wooden table, surrounded by soft morning light. The coffee is rich, dark, and topped with a light layer of creamy froth, droplets of condensation sliding down the glass.",
171
+ "Vector illustration of a horse, vector graphic design with flat colors on an brown background in the style of vector art, using simple shapes and graphics with simple details, professionally designed as a tshirt logo ready for print on a white background. --ar 89:82 --v 6.0 --style raw",
172
+ "Man in brown leather jacket posing for camera, in the style of sleek and stylized, clockpunk, subtle shades, exacting precision, ferrania p30 --ar 67:101 --v 5",
173
+ "Commercial photography, giant burger, white lighting, studio light, 8k octane rendering, high resolution photography, insanely detailed, fine details, on white isolated plain, 8k, commercial photography, stock photo, professional color grading, --v 4 --ar 9:16 "
174
+ ]
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
175
 
176
+ css = '''
177
+ .gradio-container {
178
+ max-width: 640px !important;
179
+ margin: 0 auto !important;
180
+ }
181
+ h1 {
182
+ text-align: center;
183
+ }
184
+ footer {
185
+ visibility: hidden;
186
+ }
187
+ '''
188
+ with gr.Blocks(css=css, theme="bethecloud/storj_theme") as demo:
189
+ gr.Markdown(DESCRIPTION)
190
+ gr.DuplicateButton(
191
+ value="Duplicate Space for private use",
192
+ elem_id="duplicate-button",
193
+ visible=os.getenv("SHOW_DUPLICATE_BUTTON") == "1",
194
+ )
195
+ with gr.Group():
196
+ with gr.Row():
197
+ prompt = gr.Text(
198
+ label="Prompt",
199
+ show_label=False,
200
+ max_lines=1,
201
+ placeholder="Enter your prompt",
202
+ container=False,
203
+ )
204
+ run_button = gr.Button("Run")
205
+ result = gr.Gallery(label="Result", columns=1, preview=True)
206
+ with gr.Accordion("Advanced options", open=False):
207
+ use_negative_prompt = gr.Checkbox(label="Use negative prompt", value=True, visible=True)
208
+ negative_prompt = gr.Text(
209
+ label="Negative prompt",
210
+ max_lines=1,
211
+ placeholder="Enter a negative prompt",
212
+ value="(deformed iris, deformed pupils, semi-realistic, cgi, 3d, render, sketch, cartoon, drawing, anime:1.4), text, close up, cropped, out of frame, worst quality, low quality, jpeg artifacts, ugly, duplicate, morbid, mutilated, extra fingers, mutated hands, poorly drawn hands, poorly drawn face, mutation, deformed, blurry, dehydrated, bad anatomy, bad proportions, extra limbs, cloned face, disfigured, gross proportions, malformed limbs, missing arms, missing legs, extra arms, extra legs, fused fingers, too many fingers, long neck",
213
+ visible=True,
214
  )
215
+ with gr.Row():
216
+ num_inference_steps = gr.Slider(
217
+ label="Steps",
218
+ minimum=10,
219
+ maximum=60,
220
+ step=1,
221
+ value=20,
222
+ )
223
+ with gr.Row():
224
+ num_images_per_prompt = gr.Slider(
225
+ label="Images",
226
+ minimum=1,
227
+ maximum=10,
228
+ step=1,
229
+ value=6,
230
+ )
231
+ seed = gr.Slider(
232
+ label="Seed",
233
+ minimum=0,
234
+ maximum=MAX_SEED,
235
+ step=1,
236
+ value=0,
237
+ visible=True
238
+ )
239
+ randomize_seed = gr.Checkbox(label="Randomize seed", value=True)
240
+ with gr.Row(visible=True):
241
+ width = gr.Slider(
242
+ label="Width",
243
+ minimum=512,
244
+ maximum=2048,
245
+ step=8,
246
+ value=1024,
247
+ )
248
+ height = gr.Slider(
249
+ label="Height",
250
+ minimum=512,
251
+ maximum=2048,
252
+ step=8,
253
+ value=1024,
254
+ )
255
+ with gr.Row():
256
+ guidance_scale = gr.Slider(
257
+ label="Guidance Scale",
258
+ minimum=0.1,
259
+ maximum=20.0,
260
+ step=0.1,
261
+ value=3.0,
262
+ )
263
+ with gr.Row(visible=True):
264
+ style_selection = gr.Radio(
265
+ show_label=True,
266
+ container=True,
267
+ interactive=True,
268
+ choices=STYLE_NAMES,
269
+ value=DEFAULT_STYLE_NAME,
270
+ label="Image Style",
271
+ )
272
+ gr.Examples(
273
+ examples=examples,
274
+ inputs=prompt,
275
+ outputs=[result, seed],
276
+ fn=generate,
277
+ cache_examples=CACHE_EXAMPLES,
278
+ )
279
+
280
+ use_negative_prompt.change(
281
+ fn=lambda x: gr.update(visible=x),
282
+ inputs=use_negative_prompt,
283
+ outputs=negative_prompt,
284
+ api_name=False,
285
+ )
286
+
287
  gr.on(
288
  triggers=[
289
  prompt.submit,
 
295
  prompt,
296
  negative_prompt,
297
  use_negative_prompt,
298
+ style_selection,
299
  seed,
300
  width,
301
  height,
302
  guidance_scale,
 
303
  randomize_seed,
 
304
  ],
305
  outputs=[result, seed],
306
  api_name="run",
307
  )
308
 
309
  if __name__ == "__main__":
310
+ demo.queue(max_size=20).launch(ssr_mode=True, show_error=True, share=True)