File size: 8,569 Bytes
176edce
 
 
 
 
 
 
ac3894a
176edce
 
 
 
ac3894a
95db5c0
 
 
343fdaf
95db5c0
8d2510b
176edce
 
 
 
 
 
 
 
 
343fdaf
95db5c0
 
343fdaf
95db5c0
 
 
 
 
 
 
 
 
 
 
 
 
8d2510b
95db5c0
de7fb8a
95db5c0
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
de7fb8a
8d2510b
0e7941e
95db5c0
 
 
 
 
 
 
 
 
66fcae2
95db5c0
 
 
 
 
 
 
 
0e7941e
95db5c0
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
47297cd
3ec2621
ba3c0ae
02fd843
95db5c0
 
 
 
 
 
 
 
 
 
18f2392
3ec2621
95db5c0
3ec2621
95db5c0
18f2392
343fdaf
176edce
95db5c0
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
import spaces
import argparse
import os
import time
from os import path
from safetensors.torch import load_file
from huggingface_hub import hf_hub_download

cache_path = path.join(path.dirname(path.abspath(__file__)), "models")
os.environ["TRANSFORMERS_CACHE"] = cache_path
os.environ["HF_HUB_CACHE"] = cache_path
os.environ["HF_HOME"] = cache_path

import gradio as gr
import torch
from diffusers import FluxPipeline

torch.backends.cuda.matmul.allow_tf32 = True

class timer:
    def __init__(self, method_name="timed process"):
        self.method = method_name
    def __enter__(self):
        self.start = time.time()
        print(f"{self.method} starts")
    def __exit__(self, exc_type, exc_val, exc_tb):
        end = time.time()
        print(f"{self.method} took {str(round(end - self.start, 2))}s")

if not path.exists(cache_path):
    os.makedirs(cache_path, exist_ok=True)

pipe = FluxPipeline.from_pretrained("black-forest-labs/FLUX.1-dev", torch_dtype=torch.bfloat16)
pipe.load_lora_weights(hf_hub_download("ByteDance/Hyper-SD", "Hyper-FLUX.1-dev-8steps-lora.safetensors"))
pipe.fuse_lora(lora_scale=0.125)
pipe.to(device="cuda", dtype=torch.bfloat16)

# Define example prompts
example_prompts = [
    "A cyberpunk cityscape at night with neon lights reflecting in puddles, towering skyscrapers and flying cars",
    "An ethereal fairy with translucent iridescent wings standing in an enchanted forest with glowing mushrooms and floating light particles",
    "A majestic dragon soaring through stormy clouds above jagged mountain peaks as lightning strikes in the background",
    "A futuristic space station orbiting a vibrant nebula with multiple colorful ringed planets visible through a massive observation window",
    "An underwater scene of an ancient lost city with ornate temples and statues covered in bioluminescent coral and swimming sea creatures"
]

# Custom CSS for neon theme
css = """
    .neon-container {
        background: linear-gradient(to right, #000428, #004e92);
        border-radius: 16px;
        box-shadow: 0 0 15px #00f3ff, 0 0 25px #00f3ff;
    }
    .neon-title {
        text-shadow: 0 0 5px #fff, 0 0 10px #fff, 0 0 15px #0073e6, 0 0 20px #0073e6, 0 0 25px #0073e6;
        color: #ffffff;
        font-weight: bold !important;
    }
    .neon-text {
        color: #00ff9d;
        text-shadow: 0 0 5px #00ff9d;
    }
    .neon-button {
        box-shadow: 0 0 5px #ff00dd, 0 0 10px #ff00dd !important;
        background: linear-gradient(90deg, #ff00dd, #8b00ff) !important;
        border: none !important;
        color: white !important;
        font-weight: bold !important;
    }
    .neon-button:hover {
        box-shadow: 0 0 10px #ff00dd, 0 0 20px #ff00dd !important;
    }
    .neon-input {
        border: 1px solid #00f3ff !important;
        box-shadow: 0 0 5px #00f3ff !important;
    }
    .neon-slider > div {
        background: linear-gradient(90deg, #00ff9d, #00f3ff) !important;
    }
    .neon-slider > div > div {
        background: #ff00dd !important;
        box-shadow: 0 0 5px #ff00dd !important;
    }
    .neon-card {
        background-color: rgba(0, 0, 0, 0.7) !important;
        border: 1px solid #00f3ff !important;
        box-shadow: 0 0 10px #00f3ff !important;
        padding: 16px !important;
        border-radius: 8px !important;
    }
    .neon-example {
        background: rgba(0, 0, 0, 0.5) !important;
        border: 1px solid #00ff9d !important;
        border-radius: 8px !important;
        padding: 8px !important;
        color: #00ff9d !important;
        box-shadow: 0 0 5px #00ff9d !important;
        margin: 4px !important;
        cursor: pointer !important;
    }
    .neon-example:hover {
        box-shadow: 0 0 10px #00ff9d, 0 0 15px #00ff9d !important;
        background: rgba(0, 255, 157, 0.2) !important;
    }
"""

with gr.Blocks(theme=gr.themes.Soft(), css=css) as demo:
    with gr.Blocks(elem_classes=["neon-container"]):
        gr.Markdown(
            """
            <div style="text-align: center; max-width: 650px; margin: 0 auto;">
                <h1 style="font-size: 3rem; font-weight: 700; margin-bottom: 1rem; display: contents;" class="neon-title">FLUX: Fast & Furious</h1>
                <p style="font-size: 1.2rem; margin-bottom: 1.5rem;" class="neon-text">AutoML team from ByteDance</p>
            </div>
            """
        )

        with gr.Row():
            with gr.Column(scale=3, elem_classes=["neon-card"]):
                with gr.Group():
                    prompt = gr.Textbox(
                        label="Your Image Description",
                        placeholder="E.g., A serene landscape with mountains and a lake at sunset",
                        lines=3,
                        elem_classes=["neon-input"]
                    )
                    
                    # Examples section
                    gr.Markdown('<p class="neon-text">Click on any example to use it:</p>')
                    with gr.Row():
                        example_boxes = [gr.Button(ex[:40] + "...", elem_classes=["neon-example"]) for ex in example_prompts]
                    
                    # Connect example buttons to the prompt textbox
                    for i, example_btn in enumerate(example_boxes):
                        example_btn.click(
                            fn=lambda x=example_prompts[i]: x,
                            outputs=prompt
                        )
                    
                    with gr.Accordion("Advanced Settings", open=False):
                        with gr.Group():
                            with gr.Row():
                                height = gr.Slider(label="Height", minimum=256, maximum=1152, step=64, value=1024, 
                                                elem_classes=["neon-slider"])
                                width = gr.Slider(label="Width", minimum=256, maximum=1152, step=64, value=1024,
                                                elem_classes=["neon-slider"])
                            
                            with gr.Row():
                                steps = gr.Slider(label="Inference Steps", minimum=6, maximum=25, step=1, value=8,
                                                elem_classes=["neon-slider"])
                                scales = gr.Slider(label="Guidance Scale", minimum=0.0, maximum=5.0, step=0.1, value=3.5,
                                                elem_classes=["neon-slider"])
                            
                            seed = gr.Number(label="Seed (for reproducibility)", value=3413, precision=0,
                                            elem_classes=["neon-input"])
                    
                    generate_btn = gr.Button("Generate Image", variant="primary", scale=1, elem_classes=["neon-button"])

            with gr.Column(scale=4, elem_classes=["neon-card"]):
                output = gr.Image(label="Your Generated Image")
        
        gr.Markdown(
            """
            <div style="max-width: 650px; margin: 2rem auto; padding: 1rem; border-radius: 10px;" class="neon-card">
                <h2 style="font-size: 1.5rem; margin-bottom: 1rem;" class="neon-text">How to Use</h2>
                <ol style="padding-left: 1.5rem; color: #00f3ff;">
                    <li>Enter a detailed description of the image you want to create.</li>
                    <li>Or click one of our exciting example prompts above!</li>
                    <li>Adjust advanced settings if desired (tap to expand).</li>
                    <li>Tap "Generate Image" and wait for your creation!</li>
                </ol>
                <p style="margin-top: 1rem; font-style: italic; color: #ff00dd;">Tip: Be specific in your description for best results!</p>
            </div>
            """
        )

    @spaces.GPU
    def process_image(height, width, steps, scales, prompt, seed):
        global pipe
        with torch.inference_mode(), torch.autocast("cuda", dtype=torch.bfloat16), timer("inference"):
            return pipe(
                prompt=[prompt],
                generator=torch.Generator().manual_seed(int(seed)),
                num_inference_steps=int(steps),
                guidance_scale=float(scales),
                height=int(height),
                width=int(width),
                max_sequence_length=256
            ).images[0]

    generate_btn.click(
        process_image,
        inputs=[height, width, steps, scales, prompt, seed],
        outputs=output
    )

if __name__ == "__main__":
    demo.launch()