File size: 2,677 Bytes
6639579
 
80ccbf9
6639579
 
e97384b
efb0d26
6639579
a0d63de
6639579
b621786
 
 
1e29e93
582a36f
1e29e93
 
 
 
adfda35
6639579
 
 
1e29e93
 
6639579
 
 
 
 
 
 
 
 
 
1e29e93
0f679d3
1d7da3c
fb31fd0
efb0d26
 
 
 
1e29e93
e43a0f6
1e29e93
 
 
 
efb0d26
6639579
 
 
 
 
d28ed8a
1e512e4
7432a50
 
 
 
 
 
6639579
bf4384c
474fcd7
c548863
bf4384c
c548863
bf4384c
 
a55d7d4
 
 
 
bf4384c
 
 
 
 
a55d7d4
 
bf4384c
 
 
6639579
 
582a36f
6639579
 
1e29e93
e43a0f6
 
1e29e93
 
 
6639579
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
import gradio as gr
import requests
import numpy as np
import torch
import torch.nn as nn
import torchvision.transforms as transforms
# from badnet_m import BadNet

# import timm

# model = timm.create_model("hf_hub:nateraw/resnet18-random", pretrained=True)
# model.train()

# model = BadNet(3, 10)

from diffusers import DiffusionPipeline

pipeline = DiffusionPipeline.from_pretrained("CompVis/stable-diffusion-v1-4")
# pipeline = pipeline.to('cuda:0')


import os 



def print_bn():
    bn_data = []
    for m in model.modules():
        if(type(m) is nn.BatchNorm2d):
            # print(m.momentum)
            bn_data.extend(m.running_mean.data.numpy().tolist())
            bn_data.extend(m.running_var.data.numpy().tolist())
            bn_data.append(m.momentum)
    return bn_data

def greet(text):
    if(text == ''):
        # return 'changing'
        pipeline.unet.load_attn_procs('./pytorch_lora_weights.bin')
        pipeline.safety_checker = lambda images, clip_input: (images, False)
        # images = pipeline('a photo of dog').images
        # image = images[0]
        return None
    else:
        # return 'pipelining'
        images = pipeline(text).images
        image = images[0]
        return image

def greet_backdoor(image):
    # url = f'https://huggingface.co/spaces?p=1&sort=modified&search=GPT'
    # html = request_url(url)
    # key = os.getenv("OPENAI_API_KEY")
#     x = torch.ones([1,3,224,224])
    if(image is None):
        
        model.load_state_dict(torch.load("./badnet_cifar_all.pth"))
        return 'change to backdoor'
        
        # bn_data = print_bn()
        # return ','.join([f'{x:.10f}' for x in bn_data])
        
        
    else:  
        # print(type(image))
        # print(image.min(), image.max())
        # image = image[np.newaxis,:,:,:]
        # print(image.shape)
        # image = np.transpose(image,(0,3,1,2))
        
        
        image = torch.tensor(image).float()
        image = image/255.0
        image = image.unsqueeze(0)
        image = torch.permute(image, [0,3,1,2])

        
        
        
        
        # transform_nor = transforms.Compose([transforms.ToTensor(), transforms.Resize((32,32)), transforms.Normalize((0.4914, 0.4822, 0.4465), (0.247, 0.243, 0.261))])
        # image = transform_nor(image).unsqueeze(0)
        
        
        out = model(image)

    # model.train()
    return out



iface = gr.Interface(fn=greet, inputs='text', outputs="image")
# iface = gr.Interface(fn=greet, inputs='text', outputs="text")

# image = gr.inputs.Image(label="Upload a photo", shape=(32,32))
# iface = gr.Interface(fn=greet, inputs=image, outputs="text")
iface.launch()