Spaces:
Running
on
T4
Running
on
T4
File size: 4,698 Bytes
d8fcee4 d767ca6 d8fcee4 99abb5b 8ec67ee d8fcee4 41ed172 bc8b335 fc70d57 8b28b93 3b68a53 41ed172 39c5f37 41ed172 9534132 41ed172 39c5f37 41ed172 9534132 41ed172 6eb17f7 7d80404 6eb17f7 9534132 6eb17f7 9534132 6eb17f7 1dab3e0 d767ca6 1dab3e0 8b28b93 41ed172 8b28b93 41ed172 d767ca6 f606112 d767ca6 6eb17f7 d767ca6 f606112 |
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 |
import gradio as gr
import torch
import numpy as np
import modin.pandas as pd
from PIL import Image
from diffusers import DiffusionPipeline, StableDiffusion3Pipeline
from huggingface_hub import hf_hub_download
device = 'cuda' if torch.cuda.is_available() else 'cpu'
torch.cuda.max_memory_allocated(device=device)
torch.cuda.empty_cache()
def genie (Model, Prompt, negative_prompt, height, width, scale, steps, seed, progress=gr.Progress(track_tqdm=True), max_sequence_length=512):
generator = np.random.seed(0) if seed == 0 else torch.manual_seed(seed)
if Model == "PhotoReal":
torch.cuda.empty_cache()
pipe = DiffusionPipeline.from_pretrained("circulus/canvers-real-v3.9.1", torch_dtype=torch.float16, safety_checker=None) if torch.cuda.is_available() else DiffusionPipeline.from_pretrained("circulus/canvers-real-v3.9.1")
pipe.enable_xformers_memory_efficient_attention()
pipe = pipe.to(device)
torch.cuda.empty_cache()
image = pipe(Prompt, negative_prompt=negative_prompt, height=height, width=width, num_inference_steps=steps, guidance_scale=scale, max_sequence_length=512).images[0]
torch.cuda.empty_cache()
return image
if Model == "Animagine XL 4":
torch.cuda.empty_cache()
animagine = DiffusionPipeline.from_pretrained("cagliostrolab/animagine-xl-4.0", torch_dtype=torch.float16, safety_checker=None) if torch.cuda.is_available() else DiffusionPipeline.from_pretrained("cagliostrolab/animagine-xl-4.0")
animagine.enable_xformers_memory_efficient_attention()
animagine = animagine.to(device)
torch.cuda.empty_cache()
image = animagine(Prompt, negative_prompt=negative_prompt, height=height, width=width, num_inference_steps=steps, guidance_scale=scale, max_sequence_length=512).images[0]
torch.cuda.empty_cache()
return image
if Model == "FXL":
torch.cuda.empty_cache()
torch.cuda.max_memory_allocated(device=device)
pipe = DiffusionPipeline.from_pretrained("circulus/canvers-fusionXL-v1", torch_dtype=torch.float32)
pipe.enable_xformers_memory_efficient_attention()
pipe = pipe.to(device)
torch.cuda.empty_cache()
#torch.cuda.max_memory_allocated(device=device)
int_image = pipe(Prompt, negative_prompt=negative_prompt, height=height, width=width, num_inference_steps=steps, guidance_scale=scale, max_sequence_length=512, output_type="latent").images
pipe = DiffusionPipeline.from_pretrained("stabilityai/stable-diffusion-xl-refiner-1.0", use_safetensors=True, torch_dtype=torch.float16, variant="fp16") if torch.cuda.is_available() else DiffusionPipeline.from_pretrained("stabilityai/stable-diffusion-xl-refiner-1.0")
pipe.enable_xformers_memory_efficient_attention()
pipe = pipe.to(device)
torch.cuda.empty_cache()
image = pipe(Prompt, negative_prompt=negative_prompt, image=int_image, denoising_start=.99, max_sequence_length=512).images[0]
torch.cuda.empty_cache()
return image
return image
gr.Interface(fn=genie, inputs=[gr.Radio(['PhotoReal', 'Animagine XL 4', "FXL"], value='PhotoReal', label='Choose Model'),
gr.Textbox(label='What you want the AI to generate. 77 Token Limit.'),
gr.Textbox(label='What you Do Not want the AI to generate. 77 Token Limit'),
gr.Slider(512, 1024, 768, step=128, label='Height'),
gr.Slider(512, 1024, 768, step=128, label='Width'),
gr.Slider(3, maximum=12, value=5, step=.25, label='Guidance Scale', info="5-7 for PhotoReal and 7-10 for Animagine"),
gr.Slider(25, maximum=50, value=25, step=25, label='Number of Iterations'),
gr.Slider(minimum=0, step=1, maximum=9999999999999999, randomize=True, label='Seed: 0 is Random'),
],
outputs=gr.Image(label='Generated Image'),
title="Manju Dream Booth V2.6 - GPU",
description="<br><br><b/>Warning: This Demo is capable of producing NSFW content.",
article = "If You Enjoyed this Demo and would like to Donate, you can send any amount to any of these Wallets. <br><br>SHIB (BEP20): 0xbE8f2f3B71DFEB84E5F7E3aae1909d60658aB891 <br>PayPal: https://www.paypal.me/ManjushriBodhisattva <br>ETH: 0xbE8f2f3B71DFEB84E5F7E3aae1909d60658aB891 <br>DOGE: DL5qRkGCzB2ENBKfEhHarvKm1qas3wyHx7<br><br>Code Monkey: <a href=\"https://huggingface.co/Manjushri\">Manjushri</a>").launch(debug=True, max_threads=80) |