File size: 3,894 Bytes
1cc659a
103e460
bec953a
 
 
b895ef0
bec953a
a9e4586
bec953a
8c080f9
 
19f35cc
0b56559
bec953a
41e9758
b895ef0
98d236f
87b05c5
b895ef0
6645505
 
b895ef0
 
 
 
 
 
6645505
b895ef0
 
 
 
6645505
c4625a8
bec953a
 
 
b895ef0
 
41e9758
c4625a8
b895ef0
 
bec953a
50e693d
bec953a
 
 
 
eb4f5e1
bec953a
 
b895ef0
 
 
 
 
bec953a
87b05c5
bec953a
b895ef0
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
import gradio as gr
import torch
import numpy as np
import modin.pandas as pd
from PIL import Image
from diffusers import DiffusionPipeline, StableDiffusion3Pipeline
from huggingface_hub import hf_hub_download

device = 'cuda' if torch.cuda.is_available() else 'cpu'
torch.cuda.max_memory_allocated(device=device)
torch.cuda.empty_cache()

def genie (Model, Prompt, negative_prompt, height, width, scale, steps, seed, progress=gr.Progress(track_tqdm=True), max_sequence_length=512):
    generator = np.random.seed(0) if seed == 0 else torch.manual_seed(seed)
    if Model == "SD3.5":
        #torch.cuda.max_memory_allocated(device=device)
        torch.cuda.empty_cache()
        SD3 = StableDiffusion3Pipeline.from_pretrained("stabilityai/stable-diffusion-3.5-medium", torch_dtype=torch.float16).to(device)
        torch.cuda.empty_cache()
        progress=gr.Progress(track_tqdm=True)
        image=SD3(
        prompt=Prompt,
        height=height,
        width=width,
        negative_prompt=negative_prompt,
        guidance_scale=scale,
        num_images_per_prompt=1,
        num_inference_steps=steps, max_sequence_length=512).images[0]
    if Model == "FXL":

        torch.cuda.empty_cache()
        #torch.cuda.max_memory_allocated(device=device)
        progress=gr.Progress(track_tqdm=True)
        pipe = DiffusionPipeline.from_pretrained("circulus/canvers-fusionXL-v1", torch_dtype=torch.float32)
        pipe.enable_xformers_memory_efficient_attention()
        pipe = pipe.to(device)
        torch.cuda.empty_cache()

        #torch.cuda.max_memory_allocated(device=device)
        int_image = pipe(Prompt, negative_prompt=negative_prompt, height=height, width=width, num_inference_steps=steps, guidance_scale=scale, max_sequence_length=512, output_type="latent").images
        pipe = DiffusionPipeline.from_pretrained("stabilityai/stable-diffusion-xl-refiner-1.0", use_safetensors=True, torch_dtype=torch.float16, variant="fp16") if torch.cuda.is_available() else DiffusionPipeline.from_pretrained("stabilityai/stable-diffusion-xl-refiner-1.0")
        pipe.enable_xformers_memory_efficient_attention()
        pipe = pipe.to(device)
        torch.cuda.empty_cache()
        image = pipe(Prompt, negative_prompt=negative_prompt, image=int_image, max_sequence_length=512, denoising_start=.99).images[0]
        torch.cuda.empty_cache()
        
    return image
    
gr.Interface(fn=genie, inputs=[gr.Radio(["SD3.5", "FXL"], value='SD3.5', label='Choose Model'),
                               gr.Textbox(label='What you want the AI to generate. 77 Token Limit.'), 
                               gr.Textbox(label='What you Do Not want the AI to generate. 77 Token Limit'),
                               gr.Slider(512, 1536, 1024, step=128, label='Height'),
                               gr.Slider(512, 1536, 1024, step=128, label='Width'),
                               gr.Slider(.5, maximum=15, value=7, step=.25, label='Guidance Scale'), 
                               gr.Slider(10, maximum=50, value=25, step=5, label='Number of Prior Iterations'),
                               gr.Slider(minimum=0, step=1, maximum=9999999999999999, randomize=True, label='Seed: 0 is Random')],
             outputs=gr.Image(label='Generated Image'), 
             title="Manju Dream Booth V2.5 with Stable Diffusion 3.5 & Fusion XL - GPU", 
             description="<br><br><b/>Warning: This Demo is capable of producing NSFW content.", 
             article = "If You Enjoyed this Demo and would like to Donate, you can send any amount to any of these Wallets. <br><br>SHIB (BEP20): 0xbE8f2f3B71DFEB84E5F7E3aae1909d60658aB891 <br>PayPal: https://www.paypal.me/ManjushriBodhisattva <br>ETH: 0xbE8f2f3B71DFEB84E5F7E3aae1909d60658aB891 <br>DOGE: D9QdVPtcU1EFH8jDC8jhU9uBcSTqUiA8h6<br><br>Code Monkey: <a href=\"https://huggingface.co/Manjushri\">Manjushri</a>").launch(debug=True)