File size: 4,930 Bytes
6e5e1d5 13da042 6e5e1d5 14a0302 6e5e1d5 88f16e0 14a0302 6e5e1d5 14a0302 6e5e1d5 14a0302 6e5e1d5 14a0302 6e5e1d5 88f16e0 6e5e1d5 14a0302 6e5e1d5 14a0302 6e5e1d5 14a0302 6e5e1d5 14a0302 6e5e1d5 f2a4654 aaba235 6e5e1d5 14a0302 6e5e1d5 88f16e0 6e5e1d5 14a0302 6e5e1d5 |
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 |
import gradio as gr
import numpy as np
import random
import spaces
from diffusers import DiffusionPipeline, FlowMatchEulerDiscreteScheduler
import torch
# Set device and model parameters
device = "cuda" if torch.cuda.is_available() else "cpu"
model_repo_id = "tensorart/stable-diffusion-3.5-large-TurboX"
torch_dtype = torch.float16 if torch.cuda.is_available() else torch.float32
# Load the pipeline with the specified torch_dtype and move it to the GPU
pipe = DiffusionPipeline.from_pretrained(model_repo_id, torch_dtype=torch_dtype)
pipe.scheduler = FlowMatchEulerDiscreteScheduler.from_pretrained(
model_repo_id, subfolder="scheduler", shift=5
)
pipe = pipe.to(device)
MAX_SEED = np.iinfo(np.int32).max
MAX_IMAGE_SIZE = 1024
def truncate_text(text, max_tokens=77):
"""
Truncate the input text to a maximum of max_tokens using the pipeline's tokenizer.
"""
if text.strip() == "":
return text
tokens = pipe.tokenizer(text, truncation=True, max_length=max_tokens, add_special_tokens=True)
truncated_text = pipe.tokenizer.decode(tokens["input_ids"], skip_special_tokens=True)
return truncated_text
@spaces.GPU(duration=65)
def infer(
prompt,
negative_prompt="",
seed=42,
randomize_seed=False,
width=1024,
height=1024,
guidance_scale=1.5,
num_inference_steps=8,
progress=gr.Progress(track_tqdm=True),
):
if randomize_seed:
seed = random.randint(0, MAX_SEED)
generator = torch.Generator(device=device).manual_seed(seed)
# Explicitly truncate prompts to avoid CLIP token warnings.
prompt = truncate_text(prompt, max_tokens=77)
negative_prompt = truncate_text(negative_prompt, max_tokens=77) if negative_prompt.strip() else ""
# Generate the image using the truncated prompts.
image = pipe(
prompt=prompt,
negative_prompt=negative_prompt,
guidance_scale=guidance_scale,
num_inference_steps=num_inference_steps,
width=width,
height=height,
generator=generator,
).images[0]
return image, seed
# UI Layout
examples = [
"A capybara wearing a suit holding a sign that reads Hello World",
]
css = """
#col-container {
margin: 0 auto;
max-width: 640px;
}
"""
with gr.Blocks(css=css) as demo:
with gr.Column(elem_id="col-container"):
gr.Markdown(" MagicPrompt trunkation issue resolved # TensorArt Stable Diffusion 3.5 Large TurboX")
gr.Markdown("[8-step distilled turbo model](https://huggingface.co/tensorart/stable-diffusion-3.5-large-TurboX)")
with gr.Row():
prompt = gr.Text(
label="Prompt",
show_label=False,
max_lines=1,
placeholder="Enter your prompt",
container=False,
)
run_button = gr.Button("Run", scale=0, variant="primary")
result = gr.Image(label="Result", show_label=False)
with gr.Accordion("Advanced Settings", open=False):
negative_prompt = gr.Text(
label="Negative prompt",
max_lines=1,
placeholder="Enter a negative prompt",
)
seed = gr.Slider(
label="Seed",
minimum=0,
maximum=MAX_SEED,
step=1,
value=0,
)
randomize_seed = gr.Checkbox(label="Randomize seed", value=True)
with gr.Row():
width = gr.Slider(
label="Width",
minimum=512,
maximum=MAX_IMAGE_SIZE,
step=32,
value=1024,
)
height = gr.Slider(
label="Height",
minimum=512,
maximum=MAX_IMAGE_SIZE,
step=32,
value=1024,
)
with gr.Row():
guidance_scale = gr.Slider(
label="Guidance scale",
minimum=0.0,
maximum=7.5,
step=0.1,
value=1.5,
)
num_inference_steps = gr.Slider(
label="Number of inference steps",
minimum=1,
maximum=50,
step=1,
value=8,
)
gr.Examples(examples=examples, inputs=[prompt], outputs=[result, seed], fn=infer, cache_examples=True, cache_mode="lazy")
gr.on(
triggers=[run_button.click, prompt.submit],
fn=infer,
inputs=[
prompt,
negative_prompt,
seed,
randomize_seed,
width,
height,
guidance_scale,
num_inference_steps,
],
outputs=[result, seed],
)
if __name__ == "__main__":
demo.launch()
|