File size: 3,484 Bytes
2562825
 
ccfbed4
31de3ea
1a92302
ddd1d55
bc31394
2562825
bc31394
cec94f6
ee9363c
31de3ea
 
bc31394
ee9363c
20dc4d8
31de3ea
 
 
 
 
 
e3d1556
31de3ea
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
e3d1556
31de3ea
 
 
 
 
 
 
 
 
 
 
 
e3d1556
c63302e
31de3ea
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
e3d1556
31de3ea
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
e3d1556
d6333ce
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
import gradio as gr
import torch
from diffusers import StableDiffusionImg2ImgPipeline
from typing import List
#StableDiffusionImg2ImgPipeline
device = "cuda" if torch.cuda.is_available() else "cpu"
model_id = "black-forest-labs/FLUX.1-dev"

pipe = AutoPipelineForImage2Image.from_pretrained(
    model_id,
    torch_dtype=torch.float16 if device == "cuda" else torch.float32,
    safety_checker=None
).to(device)

pipe.enable_attention_slicing()

styles = {
    "Classic Ghibli": "ghibli style portrait",
    "Spirited Forest": "studio ghibli mystical forest portrait, soft lighting",
    "Windy Valley": "ghibli style sky valley portrait, dreamy atmosphere",
    "Cozy Home": "ghibli style cozy cottage scene, warm tones"
}

def generate_final_image(
    image: Image.Image,
    style_choice: str,
    steps: int,
    history: List[Image.Image],
    progress: gr.Progress = gr.Progress(track_tqdm=True)
) -> tuple:
    prompt = styles.get(style_choice, "ghibli style portrait")
    
    with torch.inference_mode():
        output = pipe(
            prompt=prompt,
            image=image,
            strength=0.6,
            guidance_scale=6.5,
            num_inference_steps=steps
        )
    
    final_image = output.images[0]
    return final_image, history + [final_image]

# Rest of the Gradio interface code remains the same as previous version
with gr.Blocks(theme=gr.themes.Soft()) as iface:
    gr.Markdown("## 🌸 **Ghibli Portrait Generator — Spicy Edition** 🌸")
    gr.Markdown("Upload a photo and transform it into an anime scene in your favorite Ghibli style! 🎬✨")
    
    with gr.Row():
        with gr.Column(scale=1):
            image_input = gr.Image(type="pil", label="📸 Upload your photo", height=300)
            style_dropdown = gr.Dropdown(
                list(styles.keys()),
                label="🎨 Choose a Ghibli Style",
                value="Classic Ghibli"
            )
            steps_slider = gr.Slider(10, 100, value=42, step=1, label="✨ Inference Steps")
            generate_btn = gr.Button("💫 Start Magic!", variant="primary")
        
        with gr.Column(scale=2):
            output_image = gr.Image(label="🌟 Final Ghibli Portrait", height=500)
            with gr.Row():
                download_btn = gr.Button("💾 Download Final Image")
                clear_btn = gr.Button("🧹 Clear History")
    
    with gr.Accordion("📜 Previous Generations", open=False):
        history_gallery = gr.Gallery(
            label="Your Ghibli Journey",
            columns=4,
            height="auto",
            object_fit="contain"
        )
    
    history_state = gr.State([])

    # Generation workflow
    generate_btn.click(
        generate_final_image,
        [image_input, style_dropdown, steps_slider, history_state],
        [output_image, history_state]
    ).then(
        lambda x: x[-4:],  # Update gallery after generation
        history_state,
        history_gallery
    )
    
    # Image selection from history
    history_gallery.select(
        lambda evt: evt,
        None,
        image_input
    )
    
    # Download handler
    download_btn.click(
        lambda img: img,
        output_image,
        gr.File(label="⬇️ Your Ghibli Portrait")
    )
    
    # Clear history
    clear_btn.click(
        lambda: [],
        None,
        history_state
    ).then(
        lambda: None,
        None,
        history_gallery
    )

iface.launch(share=True, debug=True)