File size: 4,807 Bytes
f5082e9
0592209
0a5443f
 
 
0592209
eb934aa
ac4cb05
 
e6f4776
9632777
e6f4776
 
9632777
0a5443f
 
 
 
 
 
 
0592209
50379c9
0a5443f
 
 
e06d97e
0a5443f
 
 
 
 
 
0592209
 
0a5443f
 
 
 
0592209
0a5443f
 
 
 
 
0592209
0a5443f
0592209
0a5443f
 
 
0592209
 
 
0a5443f
0592209
 
0a5443f
 
 
 
 
 
e22d149
0a5443f
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
0592209
6bc5b44
0a5443f
 
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
import spaces 
import gradio as gr
import torch
from PIL import Image
from diffusers import DiffusionPipeline
import random
 
use_auth_token=True

import os
from huggingface_hub import login

login(token=os.getenv("HF_TOKEN"))

torch.backends.cudnn.deterministic = True
torch.backends.cudnn.benchmark = False
torch.backends.cuda.matmul.allow_tf32 = True

# Initialize the base model and specific LoRA
base_model = "black-forest-labs/FLUX.1-dev"
pipe = DiffusionPipeline.from_pretrained(base_model, torch_dtype=torch.bfloat16)

lora_repo = "metek7/king23_james"
trigger_word = ""  # Leave trigger_word blank if not used.
pipe.load_lora_weights(lora_repo)

pipe.to("cpu")

MAX_SEED = 2**32-1

@spaces.GPU()
def run_lora(prompt, cfg_scale, steps, randomize_seed, seed, width, height, lora_scale, progress=gr.Progress(track_tqdm=True)):
    # Set random seed for reproducibility
    if randomize_seed:
        seed = random.randint(0, MAX_SEED)
    generator = torch.Generator(device="cuda").manual_seed(seed)

    # Update progress bar (0% saat mulai)
    progress(0, "Starting image generation...")

    # Generate image with progress updates
    for i in range(1, steps + 1):
        # Simulate the processing step (in a real scenario, you would integrate this with your image generation process)
        if i % (steps // 10) == 0:  # Update every 10% of the steps
            progress(i / steps * 100, f"Processing step {i} of {steps}...")

    # Generate image using the pipeline
    image = pipe(
        prompt=f"{prompt} {trigger_word}",
        num_inference_steps=steps,
        guidance_scale=cfg_scale,
        width=width,
        height=height,
        generator=generator,
        joint_attention_kwargs={"scale": lora_scale},
    ).images[0]

    # Final update (100%)
    progress(100, "Completed!")

    yield image, seed

# Example cached image and settings
example_image_path = "nIGIS9bwO2wP9CUdc2YYZ.webp"  # Replace with the actual path to the example image
example_prompt = """A Jelita Sukawati speaker is captured mid-speech. She has long, dark brown hair that cascades over her shoulders, framing her radiant, smiling face. Her Latina features are highlighted by warm, sun-kissed skin and bright, expressive eyes. She gestures with her left hand, displaying a delicate ring on her pinky finger, as she speaks passionately.
The woman is wearing a colorful, patterned dress with a green lanyard featuring multiple badges and logos hanging around her neck. The lanyard prominently displays the "CagliostroLab" text.
Behind her, there is a blurred background with a white banner containing logos and text, indicating a professional or conference setting. The overall scene captures the energy and vibrancy of her presentation."""
example_cfg_scale = 3.2
example_steps = 32
example_width = 1152
example_height = 896
example_seed = 3981632454
example_lora_scale = 0.85

def load_example():
    # Load example image from file
    example_image = Image.open(example_image_path)
    return example_prompt, example_cfg_scale, example_steps, True, example_seed, example_width, example_height, example_lora_scale, example_image

with gr.Blocks() as app:
    gr.Markdown("# Flux RealismLora Image Generator")
    with gr.Row():
        with gr.Column(scale=3):
            prompt = gr.TextArea(label="Prompt", placeholder="Type a prompt", lines=5)
            generate_button = gr.Button("Generate")
            cfg_scale = gr.Slider(label="CFG Scale", minimum=1, maximum=20, step=0.5, value=example_cfg_scale)
            steps = gr.Slider(label="Steps", minimum=1, maximum=100, step=1, value=example_steps)
            width = gr.Slider(label="Width", minimum=256, maximum=1536, step=64, value=example_width)
            height = gr.Slider(label="Height", minimum=256, maximum=1536, step=64, value=example_height)
            randomize_seed = gr.Checkbox(True, label="Randomize seed")
            seed = gr.Slider(label="Seed", minimum=0, maximum=MAX_SEED, step=1, value=example_seed)
            lora_scale = gr.Slider(label="LoRA Scale", minimum=0, maximum=1, step=0.01, value=example_lora_scale)
        with gr.Column(scale=1):
            result = gr.Image(label="Generated Image")
            gr.Markdown("Generate images using RealismLora and a text prompt.\n[[non-commercial license, Flux.1 Dev](https://huggingface.co/black-forest-labs/FLUX.1-dev/blob/main/LICENSE.md)]")

    # Automatically load example data and image when the interface is launched
    app.load(load_example, inputs=[], outputs=[prompt, cfg_scale, steps, randomize_seed, seed, width, height, lora_scale, result])
    
    generate_button.click(
        run_lora,
        inputs=[prompt, cfg_scale, steps, randomize_seed, seed, width, height, lora_scale],
        outputs=[result, seed]
    )
 
app.queue()
app.launch()