flx8lora / app.py
fantos's picture
Update app.py
95db5c0 verified
import spaces
import argparse
import os
import time
from os import path
from safetensors.torch import load_file
from huggingface_hub import hf_hub_download
cache_path = path.join(path.dirname(path.abspath(__file__)), "models")
os.environ["TRANSFORMERS_CACHE"] = cache_path
os.environ["HF_HUB_CACHE"] = cache_path
os.environ["HF_HOME"] = cache_path
import gradio as gr
import torch
from diffusers import FluxPipeline
torch.backends.cuda.matmul.allow_tf32 = True
class timer:
def __init__(self, method_name="timed process"):
self.method = method_name
def __enter__(self):
self.start = time.time()
print(f"{self.method} starts")
def __exit__(self, exc_type, exc_val, exc_tb):
end = time.time()
print(f"{self.method} took {str(round(end - self.start, 2))}s")
if not path.exists(cache_path):
os.makedirs(cache_path, exist_ok=True)
pipe = FluxPipeline.from_pretrained("black-forest-labs/FLUX.1-dev", torch_dtype=torch.bfloat16)
pipe.load_lora_weights(hf_hub_download("ByteDance/Hyper-SD", "Hyper-FLUX.1-dev-8steps-lora.safetensors"))
pipe.fuse_lora(lora_scale=0.125)
pipe.to(device="cuda", dtype=torch.bfloat16)
# Define example prompts
example_prompts = [
"A cyberpunk cityscape at night with neon lights reflecting in puddles, towering skyscrapers and flying cars",
"An ethereal fairy with translucent iridescent wings standing in an enchanted forest with glowing mushrooms and floating light particles",
"A majestic dragon soaring through stormy clouds above jagged mountain peaks as lightning strikes in the background",
"A futuristic space station orbiting a vibrant nebula with multiple colorful ringed planets visible through a massive observation window",
"An underwater scene of an ancient lost city with ornate temples and statues covered in bioluminescent coral and swimming sea creatures"
]
# Custom CSS for neon theme
css = """
.neon-container {
background: linear-gradient(to right, #000428, #004e92);
border-radius: 16px;
box-shadow: 0 0 15px #00f3ff, 0 0 25px #00f3ff;
}
.neon-title {
text-shadow: 0 0 5px #fff, 0 0 10px #fff, 0 0 15px #0073e6, 0 0 20px #0073e6, 0 0 25px #0073e6;
color: #ffffff;
font-weight: bold !important;
}
.neon-text {
color: #00ff9d;
text-shadow: 0 0 5px #00ff9d;
}
.neon-button {
box-shadow: 0 0 5px #ff00dd, 0 0 10px #ff00dd !important;
background: linear-gradient(90deg, #ff00dd, #8b00ff) !important;
border: none !important;
color: white !important;
font-weight: bold !important;
}
.neon-button:hover {
box-shadow: 0 0 10px #ff00dd, 0 0 20px #ff00dd !important;
}
.neon-input {
border: 1px solid #00f3ff !important;
box-shadow: 0 0 5px #00f3ff !important;
}
.neon-slider > div {
background: linear-gradient(90deg, #00ff9d, #00f3ff) !important;
}
.neon-slider > div > div {
background: #ff00dd !important;
box-shadow: 0 0 5px #ff00dd !important;
}
.neon-card {
background-color: rgba(0, 0, 0, 0.7) !important;
border: 1px solid #00f3ff !important;
box-shadow: 0 0 10px #00f3ff !important;
padding: 16px !important;
border-radius: 8px !important;
}
.neon-example {
background: rgba(0, 0, 0, 0.5) !important;
border: 1px solid #00ff9d !important;
border-radius: 8px !important;
padding: 8px !important;
color: #00ff9d !important;
box-shadow: 0 0 5px #00ff9d !important;
margin: 4px !important;
cursor: pointer !important;
}
.neon-example:hover {
box-shadow: 0 0 10px #00ff9d, 0 0 15px #00ff9d !important;
background: rgba(0, 255, 157, 0.2) !important;
}
"""
with gr.Blocks(theme=gr.themes.Soft(), css=css) as demo:
with gr.Blocks(elem_classes=["neon-container"]):
gr.Markdown(
"""
<div style="text-align: center; max-width: 650px; margin: 0 auto;">
<h1 style="font-size: 3rem; font-weight: 700; margin-bottom: 1rem; display: contents;" class="neon-title">FLUX: Fast & Furious</h1>
<p style="font-size: 1.2rem; margin-bottom: 1.5rem;" class="neon-text">AutoML team from ByteDance</p>
</div>
"""
)
with gr.Row():
with gr.Column(scale=3, elem_classes=["neon-card"]):
with gr.Group():
prompt = gr.Textbox(
label="Your Image Description",
placeholder="E.g., A serene landscape with mountains and a lake at sunset",
lines=3,
elem_classes=["neon-input"]
)
# Examples section
gr.Markdown('<p class="neon-text">Click on any example to use it:</p>')
with gr.Row():
example_boxes = [gr.Button(ex[:40] + "...", elem_classes=["neon-example"]) for ex in example_prompts]
# Connect example buttons to the prompt textbox
for i, example_btn in enumerate(example_boxes):
example_btn.click(
fn=lambda x=example_prompts[i]: x,
outputs=prompt
)
with gr.Accordion("Advanced Settings", open=False):
with gr.Group():
with gr.Row():
height = gr.Slider(label="Height", minimum=256, maximum=1152, step=64, value=1024,
elem_classes=["neon-slider"])
width = gr.Slider(label="Width", minimum=256, maximum=1152, step=64, value=1024,
elem_classes=["neon-slider"])
with gr.Row():
steps = gr.Slider(label="Inference Steps", minimum=6, maximum=25, step=1, value=8,
elem_classes=["neon-slider"])
scales = gr.Slider(label="Guidance Scale", minimum=0.0, maximum=5.0, step=0.1, value=3.5,
elem_classes=["neon-slider"])
seed = gr.Number(label="Seed (for reproducibility)", value=3413, precision=0,
elem_classes=["neon-input"])
generate_btn = gr.Button("Generate Image", variant="primary", scale=1, elem_classes=["neon-button"])
with gr.Column(scale=4, elem_classes=["neon-card"]):
output = gr.Image(label="Your Generated Image")
gr.Markdown(
"""
<div style="max-width: 650px; margin: 2rem auto; padding: 1rem; border-radius: 10px;" class="neon-card">
<h2 style="font-size: 1.5rem; margin-bottom: 1rem;" class="neon-text">How to Use</h2>
<ol style="padding-left: 1.5rem; color: #00f3ff;">
<li>Enter a detailed description of the image you want to create.</li>
<li>Or click one of our exciting example prompts above!</li>
<li>Adjust advanced settings if desired (tap to expand).</li>
<li>Tap "Generate Image" and wait for your creation!</li>
</ol>
<p style="margin-top: 1rem; font-style: italic; color: #ff00dd;">Tip: Be specific in your description for best results!</p>
</div>
"""
)
@spaces.GPU
def process_image(height, width, steps, scales, prompt, seed):
global pipe
with torch.inference_mode(), torch.autocast("cuda", dtype=torch.bfloat16), timer("inference"):
return pipe(
prompt=[prompt],
generator=torch.Generator().manual_seed(int(seed)),
num_inference_steps=int(steps),
guidance_scale=float(scales),
height=int(height),
width=int(width),
max_sequence_length=256
).images[0]
generate_btn.click(
process_image,
inputs=[height, width, steps, scales, prompt, seed],
outputs=output
)
if __name__ == "__main__":
demo.launch()