|
import gradio as gr |
|
from PIL import Image |
|
|
|
import torch |
|
import re |
|
import os |
|
import requests |
|
|
|
from customization import customize_vae_decoder |
|
from diffusers import AutoencoderKL, DDPMScheduler, StableDiffusionPipeline, UNet2DConditionModel, DDIMScheduler, EulerDiscreteScheduler |
|
from torchvision import transforms |
|
from attribution import MappingNetwork |
|
|
|
import math |
|
from typing import List |
|
from PIL import Image, ImageChops |
|
import numpy as np |
|
import torch |
|
|
|
with gr.Blocks() as demo: |
|
gr.Markdown( |
|
"""<div style="transform: translate(0, 50%);"> |
|
<h1 style="text-align: center;"><b>WOUAF: |
|
Weight Modulation for User Attribution and Fingerprinting in Text-to-Image Diffusion Models</b> <br> <a href="https://wouaf.vercel.app">Project Page</a> <a href="https://huggingface.co/spaces/wouaf/WOUAF-Text-to-Image">New Demo</a></h1> |
|
<br> |
|
<br> |
|
<br> |
|
<br> |
|
<br> |
|
<br> |
|
<h1 style="text-align: center;"> With generous support from Intel, we have <a href="https://huggingface.co/spaces/wouaf/WOUAF-Text-to-Image">transferred the demo</a> to a better and faster GPU. </h1> |
|
</div> |
|
""" |
|
) |
|
|
|
|
|
if __name__ == "__main__": |
|
demo.launch() |
|
|