Spaces:
Running
on
Zero
Running
on
Zero
import os | |
import json | |
import torch | |
import gc | |
import numpy as np | |
import gradio as gr | |
from PIL import Image | |
from diffusers import StableDiffusionXLPipeline | |
import open_clip | |
from huggingface_hub import hf_hub_download | |
from IP_Composer.IP_Adapter.ip_adapter import IPAdapterXL | |
from IP_Composer.perform_swap import compute_dataset_embeds_svd, get_modified_images_embeds_composition | |
import spaces | |
import random | |
device = "cuda" if torch.cuda.is_available() else "cpu" | |
# Initialize SDXL pipeline | |
base_model_path = "stabilityai/stable-diffusion-xl-base-1.0" | |
pipe = StableDiffusionXLPipeline.from_pretrained( | |
base_model_path, | |
torch_dtype=torch.float16, | |
add_watermarker=False, | |
) | |
# Initialize IP-Adapter | |
image_encoder_repo = 'h94/IP-Adapter' | |
image_encoder_subfolder = 'models/image_encoder' | |
ip_ckpt = hf_hub_download('h94/IP-Adapter', subfolder="sdxl_models", filename='ip-adapter_sdxl_vit-h.bin') | |
ip_model = IPAdapterXL(pipe, image_encoder_repo, image_encoder_subfolder, ip_ckpt, device) | |
# Initialize CLIP model | |
clip_model, _, preprocess = open_clip.create_model_and_transforms('hf-hub:laion/CLIP-ViT-H-14-laion2B-s32B-b79K') | |
clip_model.to(device) | |
CONCEPTS_MAP={ | |
"age": "age_descriptions.npy", | |
"animal fur": "fur_descriptions.npy", | |
"dogs": "dog_descriptions.npy", | |
"emotions": "emotion_descriptions.npy", | |
"flowers": "flower_descriptions.npy", | |
"fruit/vegtable": "fruit_vegetable_descriptions.npy", | |
"outfit type": "outfit_descriptions.npy", | |
"outfit pattern (including color)": "outfit_pattern_descriptions.npy", | |
"patterns": "pattern_descriptions.npy", | |
"patterns (including color)": "pattern_descriptions_with_colors.npy", | |
"vehicle": "vehicle_descriptions.npy", | |
"daytime": "times_of_day_descriptions.npy", | |
"pose": "person_poses_descriptions.npy", | |
"season": "season_descriptions.npy", | |
"material": "material_descriptions_with_gems.npy" | |
} | |
RANKS_MAP={ | |
"age": 30, | |
"animal fur": 80, | |
"dogs": 30, | |
"emotions": 30, | |
"flowers": 30, | |
"fruit/vegtable": 30, | |
"outfit type": 30, | |
"outfit pattern (including color)": 80, | |
"patterns": 80, | |
"patterns (including color)": 80, | |
"vehicle": 30, | |
"daytime": 30, | |
"pose": 30, | |
"season": 30, | |
"material": 80, | |
} | |
concept_options = list(CONCEPTS_MAP.keys()) | |
examples = [ | |
['./IP_Composer/assets/objects/mug.png', './IP_Composer/assets/patterns/splash.png', 'patterns (including color)', None, None, None, None, 80, 30, 30, None,1.0,0, 30], | |
['./IP_Composer/assets/emotions/joyful.png', './IP_Composer/assets/emotions/sad.png', 'emotions', './IP_Composer/assets/age/kid.png', 'age', None, None, 30, 30, 30, None,1.0,0, 30] | |
] | |
def generate_examples(base_image, | |
concept_image1, concept_name1, | |
concept_image2, concept_name2, | |
concept_image3, concept_name3, | |
rank1, rank2, rank3, | |
prompt, scale, seed, num_inference_steps): | |
return process_and_display(base_image, | |
concept_image1, concept_name1, | |
concept_image2, concept_name2, | |
concept_image3, concept_name3, | |
rank1, rank2, rank3, | |
prompt, scale, seed, num_inference_steps) | |
MAX_SEED = np.iinfo(np.int32).max | |
def randomize_seed_fn(seed: int, randomize_seed: bool) -> int: | |
if randomize_seed: | |
seed = random.randint(0, MAX_SEED) | |
return seed | |
def change_rank_default(concept_name): | |
return RANKS_MAP.get(concept_name, 30) | |
def get_image_embeds(pil_image, model=clip_model, preproc=preprocess, dev=device): | |
"""Get CLIP image embeddings for a given PIL image""" | |
image = preproc(pil_image)[np.newaxis, :, :, :] | |
with torch.no_grad(): | |
embeds = model.encode_image(image.to(dev)) | |
return embeds.cpu().detach().numpy() | |
def process_images( | |
base_image, | |
concept_image1, concept_name1, | |
concept_image2=None, concept_name2=None, | |
concept_image3=None, concept_name3=None, | |
rank1=10, rank2=10, rank3=10, | |
prompt=None, | |
scale=1.0, | |
seed=420, | |
num_inference_steps=50, | |
): | |
"""Process the base image and concept images to generate modified images""" | |
# Process base image | |
base_image_pil = Image.fromarray(base_image).convert("RGB") | |
base_embed = get_image_embeds(base_image_pil, clip_model, preprocess, device) | |
# Process concept images | |
concept_images = [] | |
concept_descriptions = [] | |
# for demo purposes we allow for up to 3 different concepts and corresponding concept images | |
if concept_image1 is not None: | |
concept_images.append(concept_image1) | |
concept_descriptions.append(CONCEPTS_MAP[concept_name1]) | |
else: | |
return None, "Please upload at least one concept image" | |
# Add second concept (optional) | |
if concept_image2 is not None: | |
concept_images.append(concept_image2) | |
concept_descriptions.append(CONCEPTS_MAP[concept_name2]) | |
# Add third concept (optional) | |
if concept_image3 is not None: | |
concept_images.append(concept_image3) | |
concept_descriptions.append(CONCEPTS_MAP[concept_name3]) | |
# Get all ranks | |
ranks = [rank1] | |
if concept_image2 is not None: | |
ranks.append(rank2) | |
if concept_image3 is not None: | |
ranks.append(rank3) | |
concept_embeds = [] | |
projection_matrices = [] | |
# for the demo, we assume 1 concept image per concept | |
# for each concept image, we calculate it's image embeedings and load the concepts textual embeddings to copmpute the projection matrix over it | |
for i, concept_name in enumerate(concept_descriptions): | |
img_pil = Image.fromarray(concept_images[i]).convert("RGB") | |
concept_embeds.append(get_image_embeds(img_pil, clip_model, preprocess, device)) | |
embeds_path = f"./IP_Composer/text_embeddings/{concept_name}" | |
with open(embeds_path, "rb") as f: | |
all_embeds_in = np.load(f) | |
projection_matrix = compute_dataset_embeds_svd(all_embeds_in, ranks[i]) | |
projection_matrices.append(projection_matrix) | |
# Create projection data structure for the composition | |
projections_data = [ | |
{ | |
"embed": embed, | |
"projection_matrix": proj_matrix | |
} | |
for embed, proj_matrix in zip(concept_embeds, projection_matrices) | |
] | |
# Generate modified images - | |
modified_images = get_modified_images_embeds_composition( | |
base_embed, | |
projections_data, | |
ip_model, | |
prompt=prompt, | |
scale=scale, | |
num_samples=1, | |
seed=seed, | |
num_inference_steps=num_inference_steps | |
) | |
return modified_images[0] | |
def process_and_display( | |
base_image, | |
concept_image1, concept_name1="age", | |
concept_image2=None, concept_name2=None, | |
concept_image3=None, concept_name3=None, | |
rank1=30, rank2=30, rank3=30, | |
prompt=None, scale=1.0, seed=0, num_inference_steps=50, | |
): | |
if base_image is None: | |
raise gr.Error("please upload a base image") | |
if concept_image1 is None: | |
raise gr.Error("choose at least one concept image") | |
if concept_image1 is None: | |
raise gr.Error("choose at least one concept type") | |
modified_images = process_images( | |
base_image, | |
concept_image1, concept_name1, | |
concept_image2, concept_name2, | |
concept_image3, concept_name3, | |
rank1, rank2, rank3, | |
prompt, scale, seed, num_inference_steps | |
) | |
return modified_images | |
# UI CSS | |
css = """ | |
#col-container { | |
margin: 0 auto; | |
max-width: 960px; | |
} | |
""" | |
with gr.Blocks(css=css) as demo: | |
gr.Markdown(f"""# IP Composer π βποΈ | |
### compose new images with visual concepts | |
following the algorithm proposed in [*IP-Composer: Semantic Composition of Visual Concepts* by Dorfman et al.](https://arxiv.org/pdf/2502.13951) | |
[[project page](https://ip-composer.github.io/IP-Composer/)] [[arxiv](https://arxiv.org/pdf/2502.13951)] | |
""") | |
with gr.Row(): | |
with gr.Column(): | |
base_image = gr.Image(label="Base Image (Required)", type="numpy") | |
with gr.Tab("concept 1"): | |
with gr.Row(): | |
with gr.Group(): | |
concept_image1 = gr.Image(label="Concept Image 1", type="numpy") | |
concept_name1 = gr.Dropdown(concept_options, label="concept 1", value=None, info="concept type") | |
with gr.Tab("concept 2 - optional"): | |
with gr.Group(): | |
concept_image2 = gr.Image(label="Concept Image 2", type="numpy") | |
concept_name2 = gr.Dropdown(concept_options, label="concept 2", value=None, info="concept type") | |
with gr.Tab("concept 3 - optional"): | |
with gr.Group(): | |
concept_image3 = gr.Image(label="Concept Image 3", type="numpy") | |
concept_name3 = gr.Dropdown(concept_options, label="concept 3", value= None, info="concept type") | |
with gr.Accordion("Advanced options", open=False): | |
prompt = gr.Textbox(label="Guidance Prompt (Optional)", placeholder="Optional text prompt to guide generation") | |
num_inference_steps = gr.Slider(minimum=1, maximum=50, value=30, step=1, label="num steps") | |
with gr.Row(): | |
scale = gr.Slider(minimum=0.1, maximum=2.0, value=1.0, step=0.1, label="Scale") | |
randomize_seed = gr.Checkbox(value=True, label="Randomize seed") | |
seed = gr.Number(value=0, label="Seed", precision=0) | |
with gr.Row(): | |
rank1 = gr.Slider(minimum=1, maximum=150, value=30, step=1, label="rank concept 1", info="rank of projection matrix") | |
rank2 = gr.Slider(minimum=1, maximum=150, value=30, step=1, label="rank concept 2") | |
rank3 = gr.Slider(minimum=1, maximum=150, value=30, step=1, label="rank concept 3") | |
with gr.Column(): | |
output_image = gr.Image(label="composed output", show_label=True) | |
submit_btn = gr.Button("Generate") | |
gr.Examples( | |
examples, | |
inputs=[base_image, | |
concept_image1, concept_name1, | |
concept_image2, concept_name2, | |
concept_image3, concept_name3, | |
rank1, rank2, rank3, | |
prompt, scale, seed, num_inference_steps], | |
outputs=[output_image], | |
fn=generate_examples, | |
cache_examples=False | |
) | |
submit_btn.click( | |
fn=randomize_seed_fn, | |
inputs=[seed, randomize_seed], | |
outputs=seed, | |
).then(fn=process_and_display, | |
inputs=[ | |
base_image, | |
concept_image1, concept_name1, | |
concept_image2, concept_name2, | |
concept_image3, concept_name3, | |
rank1, rank2, rank3, | |
prompt, scale, seed, num_inference_steps | |
], | |
outputs=[output_image] | |
) | |
concept_name1.select( | |
fn= change_rank_default, | |
inputs=[concept_name1], | |
outputs=[rank1] | |
) | |
concept_name2.select( | |
fn= change_rank_default, | |
inputs=[concept_name2], | |
outputs=[rank2] | |
) | |
concept_name3.select( | |
fn= change_rank_default, | |
inputs=[concept_name3], | |
outputs=[rank3] | |
) | |
demo.launch() |