Spaces:
Paused
Paused
File size: 1,673 Bytes
604797f |
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 |
import gradio as gr
from huggingface_hub import login
import os
import spaces
import torch
from diffusers import StableDiffusionXLPipeline
from PIL import Image
import torch
from diffusers import AutoPipelineForText2Image, DDIMScheduler
from diffusers import AutoPipelineForText2Image
from diffusers.utils import load_image
import torch
token = os.getenv("HF_TOKEN")
login(token=token)
pipeline = AutoPipelineForText2Image.from_pretrained("stabilityai/sdxl-turbo", torch_dtype=torch.float16).to("cuda")
pipeline.load_ip_adapter("h94/IP-Adapter", subfolder="sdxl_models", weight_name="ip-adapter_sdxl.bin")
@spaces.GPU
def generate_image(prompt, reference_image, controlnet_conditioning_scale):
style_images = [load_image(f.file.name) for f in reference_image]
pipeline.set_ip_adapter_scale(controlnet_conditioning_scale)
image = pipeline(
prompt=prompt,
ip_adapter_image=[style_images],
negative_prompt="",
guidance_scale=5,
num_inference_steps=30,
).images[0]
return image
# Set up Gradio interface
interface = gr.Interface(
fn=generate_image,
inputs=[
gr.Textbox(label="Prompt"),
# gr.Image( type= "filepath",label="Reference Image (Style)"),
gr.File(file_count="multiple",label="Reference Image (Style)"),
gr.Slider(label="Control Net Conditioning Scale", minimum=0, maximum=1.0, step=0.1, value=1.0),
],
outputs="image",
title="Image Generation with Stable Diffusion 3 medium and ControlNet",
description="Generates an image based on a text prompt and a reference image using Stable Diffusion 3 medium with ControlNet."
)
interface.launch()
|