import os import json import torch import gc import numpy as np import gradio as gr from PIL import Image from diffusers import StableDiffusionXLPipeline import open_clip from huggingface_hub import hf_hub_download from IP_Composer.IP_Adapter.ip_adapter import IPAdapterXL from IP_Composer.perform_swap import compute_dataset_embeds_svd, get_modified_images_embeds_composition device = "cuda" if torch.cuda.is_available() else "cpu" # Initialize SDXL pipeline base_model_path = "stabilityai/stable-diffusion-xl-base-1.0" pipe = StableDiffusionXLPipeline.from_pretrained( base_model_path, torch_dtype=torch.float16, add_watermarker=False, ) # Initialize IP-Adapter image_encoder_repo = 'h94/IP-Adapter' image_encoder_subfolder = 'models/image_encoder' ip_ckpt = hf_hub_download('h94/IP-Adapter', subfolder="sdxl_models", filename='ip-adapter_sdxl_vit-h.bin') ip_model = IPAdapterXL(pipe, image_encoder_repo, image_encoder_subfolder, ip_ckpt, device) # Initialize CLIP model clip_model, _, preprocess = open_clip.create_model_and_transforms('hf-hub:laion/CLIP-ViT-H-14-laion2B-s32B-b79K') clip_model.to(device) print("Models initialized successfully!") def get_image_embeds(pil_image, model=clip_model, preproc=preprocess, dev=device): """Get CLIP image embeddings for a given PIL image""" image = preproc(pil_image)[np.newaxis, :, :, :] with torch.no_grad(): embeds = model.encode_image(image.to(dev)) return embeds.cpu().detach().numpy() def process_images( base_image, concept_image1, concept_desc1, concept_image2=None, concept_desc2=None, concept_image3=None, concept_desc3=None, rank1=10, rank2=10, rank3=10, prompt=None, scale=1.0, seed=420 ): """Process the base image and concept images to generate modified images""" # Process base image base_image_pil = Image.fromarray(base_image).convert("RGB") base_embed = get_image_embeds(base_image_pil) # Process concept images concept_images = [] concept_descriptions = [] # Add first concept (required) if concept_image1 is not None: concept_images.append(concept_image1) concept_descriptions.append(concept_desc1 if concept_desc1 else "Concept 1") else: return None, "Please upload at least one concept image" # Add second concept (optional) if concept_image2 is not None: concept_images.append(concept_image2) concept_descriptions.append(concept_desc2 if concept_desc2 else "Concept 2") # Add third concept (optional) if concept_image3 is not None: concept_images.append(concept_image3) concept_descriptions.append(concept_desc3 if concept_desc3 else "Concept 3") # Get all ranks ranks = [rank1] if concept_image2 is not None: ranks.append(rank2) if concept_image3 is not None: ranks.append(rank3) concept_embeds = [] for img in concept_images: if img is not None: img_pil = Image.fromarray(img).convert("RGB") concept_embeds.append(get_image_embeds(img_pil)) # Compute projection matrices projection_matrices = [] for i, embed in enumerate(concept_embeds): # For a single image, we need to reshape to have the same format as a collection single_embed = embed.reshape(1, *embed.shape) projection_matrix = compute_dataset_embeds_svd(single_embed, ranks[i]) projection_matrices.append(projection_matrix) # Create projection data structure for the composition projections_data = [ { "embed": embed, "projection_matrix": proj_matrix } for embed, proj_matrix in zip(concept_embeds, projection_matrices) ] # Generate modified images - modified_images = get_modified_images_embeds_composition( base_embed, projections_data, ip_model, prompt=prompt, scale=scale, num_samples=1, seed=seed ) return modified_images def process_and_display( base_image, concept_image1, concept_desc1, concept_image2=None, concept_desc2=None, concept_image3=None, concept_desc3=None, rank1=10, rank2=10, rank3=10, prompt=None, scale=1.0, seed=420 ): """Wrapper for process_images that handles UI updates""" if base_image is None: return None, "Please upload a base image" if concept_image1 is None: return None, "Please upload at least one concept image" modified_images = process_images( base_image, concept_image1, concept_desc1, concept_image2, concept_desc2, concept_image3, concept_desc3, rank1, rank2, rank3, prompt, scale, seed ) # # Clean up memory # torch.cuda.empty_cache() # gc.collect() return modified_images with gr.Blocks(title="Image Concept Composition") as demo: gr.Markdown("# IP Composer") gr.Markdown("") with gr.Row(): with gr.Column(): base_image = gr.Image(label="Base Image (Required)", type="numpy") with gr.Row(): with gr.Column(scale=2): concept_image1 = gr.Image(label="Concept Image 1 (Required)", type="numpy") with gr.Column(scale=1): concept_desc1 = gr.Textbox(label="Concept 1 Description", placeholder="Describe this concept") rank1 = gr.Slider(minimum=1, maximum=50, value=10, step=1, label="Rank 1") with gr.Row(): with gr.Column(scale=2): concept_image2 = gr.Image(label="Concept Image 2 (Optional)", type="numpy") with gr.Column(scale=1): concept_desc2 = gr.Textbox(label="Concept 2 Description", placeholder="Describe this concept") rank2 = gr.Slider(minimum=1, maximum=50, value=10, step=1, label="Rank 2") with gr.Row(): with gr.Column(scale=2): concept_image3 = gr.Image(label="Concept Image 3 (Optional)", type="numpy") with gr.Column(scale=1): concept_desc3 = gr.Textbox(label="Concept 3 Description", placeholder="Describe this concept") rank3 = gr.Slider(minimum=1, maximum=50, value=10, step=1, label="Rank 3") prompt = gr.Textbox(label="Guidance Prompt (Optional)", placeholder="Optional text prompt to guide generation") with gr.Row(): scale = gr.Slider(minimum=0.1, maximum=2.0, value=1.0, step=0.1, label="Scale") seed = gr.Number(value=420, label="Seed", precision=0) submit_btn = gr.Button("Generate") with gr.Column(): output_image = gr.Image(label="composed output", show_label=True) submit_btn.click( fn=process_and_display, inputs=[ base_image, concept_image1, concept_desc1, concept_image2, concept_desc2, concept_image3, concept_desc3, rank1, rank2, rank3, prompt, scale, seed ], outputs=[output_image] ) demo.launch()