Junfeng5 commited on
Commit
735672d
·
verified ·
1 Parent(s): 7385f22

enable app

Browse files
Files changed (4) hide show
  1. app.py +301 -51
  2. helpers.py +75 -0
  3. inference_i2t.py +110 -0
  4. t2i_new.py +243 -0
app.py CHANGED
@@ -1,64 +1,314 @@
 
 
 
1
  import gradio as gr
2
- from huggingface_hub import InferenceClient
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
3
 
 
 
 
 
 
 
 
 
 
 
 
 
 
4
  """
5
- For more information on `huggingface_hub` Inference API support, please check the docs: https://huggingface.co/docs/huggingface_hub/v0.22.2/en/guides/inference
 
 
 
 
6
  """
7
- client = InferenceClient("HuggingFaceH4/zephyr-7b-beta")
8
 
9
 
10
- def respond(
11
- message,
12
- history: list[tuple[str, str]],
13
- system_message,
14
- max_tokens,
15
- temperature,
16
- top_p,
17
- ):
18
- messages = [{"role": "system", "content": system_message}]
19
 
20
- for val in history:
21
- if val[0]:
22
- messages.append({"role": "user", "content": val[0]})
23
- if val[1]:
24
- messages.append({"role": "assistant", "content": val[1]})
25
 
26
- messages.append({"role": "user", "content": message})
 
27
 
28
- response = ""
 
 
 
 
 
 
29
 
30
- for message in client.chat_completion(
31
- messages,
32
- max_tokens=max_tokens,
33
- stream=True,
34
- temperature=temperature,
35
- top_p=top_p,
36
- ):
37
- token = message.choices[0].delta.content
38
 
39
- response += token
40
- yield response
 
 
 
 
 
41
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
42
 
43
- """
44
- For information on how to customize the ChatInterface, peruse the gradio docs: https://www.gradio.app/docs/chatinterface
45
- """
46
- demo = gr.ChatInterface(
47
- respond,
48
- additional_inputs=[
49
- gr.Textbox(value="You are a friendly Chatbot.", label="System message"),
50
- gr.Slider(minimum=1, maximum=2048, value=512, step=1, label="Max new tokens"),
51
- gr.Slider(minimum=0.1, maximum=4.0, value=0.7, step=0.1, label="Temperature"),
52
- gr.Slider(
53
- minimum=0.1,
54
- maximum=1.0,
55
- value=0.95,
56
- step=0.05,
57
- label="Top-p (nucleus sampling)",
58
- ),
59
- ],
60
- )
61
-
62
-
63
- if __name__ == "__main__":
64
- demo.launch()
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import time
2
+ from threading import Thread
3
+
4
  import gradio as gr
5
+ import torch
6
+ import PIL
7
+ from PIL import Image
8
+ import torch
9
+ from transformers import AutoTokenizer, AutoModelForCausalLM
10
+ import os
11
+ from tqdm import tqdm
12
+ from helpers import sample, expand2square
13
+
14
+ # from transformers import AutoProcessor, LlavaForConditionalGeneration
15
+ from transformers import TextIteratorStreamer
16
+ from conversation import conv_templates
17
+ from model import *
18
+ from unitok.config import Args
19
+ from unitok.model import UniTok
20
+ from mm_utils import tokenizer_image_token, get_model_name_from_path
21
+ from torchvision import transforms
22
+
23
+ PILtransform = transforms.ToPILImage()
24
+
25
 
26
+ # import spaces
27
+ # import os
28
+ # os.system("pip uninstall -y gradio")
29
+ # os.system("pip install gradio==4.44.1")
30
+ # os.system("pip install gradio_client==1.3.0")
31
+
32
+
33
+ IMAGE_TOKEN_INDEX=-200
34
+ PLACEHOLDER = """
35
+ <div style="padding: 30px; text-align: center; display: flex; flex-direction: column; align-items: center;">
36
+ <img src='file/Liquid_icon.png' style="width: 80%; max-width: 600px; height: auto; opacity: 0.5;">
37
+ <h1 style="font-size: 20px; margin-bottom: 1px; opacity: 0.55;">UniTok-MLLM-7B</h1>
38
+ </div>
39
  """
40
+
41
+ CSS ="""
42
+ .contain { display: flex; flex-direction: column; }
43
+ #component-0 { height: 100%; }
44
+ #chatbot { flex-grow: 1; }
45
  """
 
46
 
47
 
48
+ title_html = """
49
+ <div style="display: flex; flex-direction: column; align-items: center; gap: 10px;">
50
+ <h1 style="margin: 0; line-height: 1; text-align: center;"> Liquid: Language Models are Scalable Multi-modal <br> Generators via Unified Understanding and Generation</h1>
51
+ </div>
52
+ """
 
 
 
 
53
 
54
+ links_html = f"""
55
+ <center><font size=3><a href='https://foundationvision.github.io/Liquid/'>Liquid</a> has been open-sourced on <a href='https://huggingface.co/Junfeng5/Liquid_V1_7B'>😊 Huggingface</a> and <a href='https://github.com/FoundationVision/Liquid'>🌟 GitHub</a>. If you find Liquid useful, a like❤️ or a star🌟 would be appreciated.</font></center>
56
+ """
 
 
57
 
58
+ introduction = f"""
59
+ Liquid explores the potential of a single LLM as a multimodal generator and its scaling laws. It achieves the level of diffusion models in visual generation and discovers the mutual enhancement between understanding and generation. More details can be found on the project <a href='https://foundationvision.github.io/Liquid/'> homepage</a> and in the <a href='https://arxiv.org/abs/2412.04332'> paper</a>. """
60
 
61
+ ckpt = torch.load(r'D:\projects\liquid_app\UniTok\UniTok_weights\unitok_tokenizer\unitok_tokenizer.pth', map_location='cpu')
62
+ vae_cfg = Args()
63
+ vae_cfg.load_state_dict(ckpt['args'])
64
+ vq_model = UniTok(vae_cfg)
65
+ vq_model.load_state_dict(ckpt['trainer']['unitok'])
66
+ vq_model.to('cuda')
67
+ vq_model.eval()
68
 
 
 
 
 
 
 
 
 
69
 
70
+ tokenizer = AutoTokenizer.from_pretrained(r'C:\debug_ckpts\unitok_mllm', padding_side='left')
71
+ vqllm = MiniGeminiLlamaForCausalLM.from_pretrained(
72
+ r'C:\debug_ckpts\unitok_mllm',
73
+ attn_implementation='flash_attention_2',
74
+ torch_dtype=torch.bfloat16
75
+ ).to('cuda')
76
+ num_codebooks = vae_cfg.num_codebooks
77
 
78
+ # @spaces.GPU
79
+ def bot_streaming_I2T(message, history):
80
+ print(message)
81
+ global stop_flag
82
+ stop_flag = True
83
+ time.sleep(0.2)
84
+ stop_flag = False
85
+ torch.cuda.empty_cache()
86
+ if message["files"]:
87
+ # message["files"][-1] is a Dict or just a string
88
+ if type(message["files"][-1]) == dict:
89
+ image = message["files"][-1]["path"]
90
+ else:
91
+ image = message["files"][-1]
92
+ else:
93
+ # if there's no image uploaded for this turn, look for images in the past turns
94
+ # kept inside tuples, take the last one
95
+ for hist in history:
96
+ if type(hist[0]) == tuple:
97
+ image = hist[0][0]
98
+ try:
99
+ if image is None:
100
+ # Handle the case where image is None
101
+ gr.Error("You need to upload an image for LLaVA to work.")
102
+ except NameError:
103
+ # Handle the case where 'image' is not defined at all
104
+ gr.Error("You need to upload an image for LLaVA to work.")
105
 
106
+ qs = message['text']
107
+ qs = '\x00<image>\x01' + '\n' + qs
108
+ conv = conv_templates['llava_v1'].copy()
109
+ conv.append_message(conv.roles[0], qs)
110
+ conv.append_message(conv.roles[1], None)
111
+ prompt = conv.get_prompt()
112
+
113
+ crop_size = 256
114
+ transform = transforms.Compose([
115
+ transforms.Resize((crop_size, crop_size)),
116
+ transforms.ToTensor(),
117
+ transforms.Normalize(mean=[0.5, 0.5, 0.5], std=[0.5, 0.5, 0.5], inplace=True)
118
+ ])
119
+
120
+ print(prompt)
121
+ image = Image.open(image).convert('RGB')
122
+ pad_image = expand2square(image, (122, 116, 104) )
123
+ # import pdb;pdb.set_trace()
124
+ img = transform(pad_image).unsqueeze(0)
125
+ img = img.to('cuda')
126
+ # import pdb;pdb.set_trace()
127
+ with torch.no_grad():
128
+ vq_code = vq_model.img_to_idx(img)
129
+ image_codes = vq_code.unsqueeze(0)
130
+ input_ids = tokenizer_image_token(prompt, tokenizer, IMAGE_TOKEN_INDEX, return_tensors='pt')
131
+ inputs = {
132
+ "inputs":input_ids.unsqueeze(0).to("cuda:0"),
133
+ "images":image_codes.to("cuda:0"),
134
+ "max_new_tokens":1024,
135
+ "bos_token_id":tokenizer.bos_token_id, # Begin of sequence token
136
+ "eos_token_id":tokenizer.eos_token_id, # End of sequence token
137
+ "pad_token_id":tokenizer.pad_token_id, # Pad token
138
+ }
139
+ streamer = TextIteratorStreamer(tokenizer, **{"skip_special_tokens": True, "skip_prompt": True})
140
+
141
+ # Run the generation in a separate thread, so that we can fetch the generated text in a non-blocking way.
142
+ generation_kwargs = dict(inputs, streamer=streamer, max_new_tokens=1024)
143
+ thread = Thread(target=vqllm.generate_mllm, kwargs=generation_kwargs)
144
+ thread.start()
145
+ generated_text = ""
146
+ for new_text in streamer:
147
+ generated_text += new_text
148
+ time.sleep(0.06)
149
+ yield generated_text
150
+
151
+
152
+
153
+ def show_gallery(images):
154
+ gallery = gr.Gallery(images, label="Gallery", columns=4, height="auto",preview=True,scale=0.05) # 设置两行两列的布局
155
+ return gallery
156
+
157
+ # @spaces.GPU
158
+ def bot_streaming_T2I(message, history,guidance_scale, temperature, top_K, top_P):
159
+
160
+ global stop_flag
161
+ stop_flag = True
162
+ time.sleep(0.2)
163
+ stop_flag = False
164
+
165
+ text_inputs = [message]*4 # generate 4 samples once
166
+ uncondition_text_inputs = ['<unconditional>\x00']*len(text_inputs)
167
+ for i in range(len(text_inputs)):
168
+ text_inputs[i] = text_inputs[i]+' Generate an image based on this description.\x00'
169
+
170
+ ori_batchsize = len(text_inputs)
171
+
172
+
173
+ with torch.no_grad():
174
+ if guidance_scale > 1:
175
+ model_inputs = tokenizer(text_inputs + uncondition_text_inputs, return_tensors="pt", padding=True).to('cuda')
176
+ else:
177
+ model_inputs = tokenizer(text_inputs, return_tensors="pt", padding=True).to('cuda')
178
+ model_kwargs = {'attention_mask':model_inputs.pop('attention_mask'),
179
+ 'use_cache': True
180
+ }
181
+ input_ids = model_inputs.pop('input_ids')
182
+ batch_size, cur_len = input_ids.shape
183
+ if "inputs_embeds" in model_kwargs:
184
+ cur_len = model_kwargs["inputs_embeds"].shape[1]
185
+ model_kwargs["cache_position"] = torch.arange(cur_len, device=input_ids.device)
186
+
187
+ with torch.no_grad():
188
+ sampling_kwargs={'temperature': temperature, 'top_k': top_K, 'top_p': top_P, 'sample_logits': True}
189
+ pred_tokens = []
190
+ input_multi_ids = None
191
+ for i in tqdm(range(256)):
192
+ model_inputs = vqllm.prepare_inputs_for_generation(input_ids, **model_kwargs)
193
+ outputs = vqllm.T2I_forward_withcache(
194
+ **model_inputs,
195
+ input_multi_ids=input_multi_ids,
196
+ return_dict=True,
197
+ output_attentions=False,
198
+ output_hidden_states=False,
199
+ )
200
+ next_embed = outputs['last_hidden_state'][:, -1:, :]
201
+ indices_arhead = []
202
+
203
+ for i_head in range(num_codebooks):
204
+ ar_next_embed = vqllm.ar_head(
205
+ inputs_embeds=next_embed,
206
+ use_cache=False,
207
+ output_attentions=False,
208
+ output_hidden_states=False,
209
+ return_dict=False,
210
+ )
211
+ next_token_logits = vqllm.ar_head.linear_head(ar_next_embed[0])
212
+ if guidance_scale > 1:
213
+ cond_logits, uncond_logits = torch.split(next_token_logits, len(next_token_logits) // 2, dim=0)
214
+ cfg_logits = uncond_logits + (cond_logits - uncond_logits) * guidance_scale
215
+ half_next_token, _ = sample(cfg_logits, **sampling_kwargs)
216
+ # pred_tokens.append(half_next_token)
217
+ next_token = torch.cat([half_next_token, half_next_token]) # [bz,1]
218
+ else:
219
+ next_token, next_prob = sample(next_token_logits, **sampling_kwargs)
220
+ # pred_tokens.append(next_token)
221
+ indices_arhead.append(next_token)
222
+ if i_head < num_codebooks - 1:
223
+ predicted_embed = vqllm.ar_head.codebooks[i_head](next_token)
224
+ next_embed = torch.cat([next_embed, predicted_embed], dim=1)
225
+
226
+ pred_tokens.append(torch.cat(indices_arhead, dim=1)) # [numcodebook,bz*2]
227
+ input_multi_ids = torch.stack(pred_tokens, dim=-1)
228
+ fake_id = torch.zeros_like(input_ids[:,:1])
229
+ input_ids = torch.cat([input_ids, fake_id], dim=-1) # add fake id for cache
230
+
231
+ model_kwargs = vqllm._update_model_kwargs_for_generation(
232
+ outputs,
233
+ model_kwargs,
234
+ is_encoder_decoder=vqllm.config.is_encoder_decoder,
235
+ )
236
+ del sampling_kwargs
237
+ del model_inputs
238
+ del outputs
239
+ del model_kwargs
240
+ # image_vq_id = input_ids[:,prompt_length:prompt_length+256]-ori_vocabe_size
241
+ image_vq_id = torch.stack(pred_tokens, dim=-1)[:ori_batchsize]
242
+
243
+
244
+ generated_image_list = []
245
+ rec_images = vq_model.idx_to_img(image_vq_id)
246
+ for index, rec_image in enumerate(rec_images):
247
+ rec_img = PILtransform(rec_image.squeeze(0).add(1).mul_(0.5).clamp_(0, 1))
248
+ generated_image_list.append(rec_img)
249
+
250
+ torch.cuda.empty_cache()
251
+ yield show_gallery(generated_image_list)
252
+
253
+
254
+ chatbot_T2I=gr.Chatbot(placeholder=PLACEHOLDER,height=600)
255
+ chat_input_T2I = gr.Textbox(placeholder="Enter text prompts...", show_label=False)
256
+
257
+ chatbot_I2T=gr.Chatbot(placeholder=PLACEHOLDER, scale=1)
258
+ chat_input_I2T = gr.MultimodalTextbox(interactive=True, file_types=["image"], placeholder="Enter message or upload file...", show_label=False)
259
+
260
+
261
+ with gr.Blocks(fill_height=True) as demo:
262
+
263
+ gr.Markdown(title_html)
264
+ gr.Markdown(links_html)
265
+ gr.Markdown(introduction)
266
+
267
+ with gr.Tab("Text To Image"):
268
+
269
+ description="Enter a text prompt or simply try one of the examples below to generate 4 images at once. Click to display the full image. You can configure hyperparameters for image generation in the Advanced Settings. "
270
+ gr.Markdown(description)
271
+ with gr.Accordion("⚙️ Advanced Settings", open=False):
272
+ with gr.Row():
273
+ guidance_scale = gr.Slider(1.0, 20.0, value=7.0, label="Guidance Scale")
274
+ temperature = gr.Slider(0.0, 1.0, value=0.9, label="temperature")
275
+ top_K = gr.Slider(1, 8192, value=4096, label="Top K")
276
+ top_P = gr.Slider(0.0, 1.0, value=0.99, label="Top P")
277
+
278
+ aaa = gr.ChatInterface(
279
+ fn=bot_streaming_T2I,
280
+ examples=[
281
+ ["young blue dragon with horn lightning in the style of dd fantasy full body",5.0, 0.9,4096,0.99],
282
+ ["A majestic Goddes of beauty, charming dressed in a regal, jeweled gown and ornate crown, her golden hair cascading down her back, in the style of Pino Daeni",5.0, 0.9,4096,0.99],
283
+ ["A highly realistic, closeup photograph of a beautiful 35 year old redread woman writing in her journal, sitting on her balcony wearing warm, stylish outfits. Shot on a Canon EOS R5, the image boasts sharp focus and intricate details. The heartwarming scene conveys love, connection, and the crisp winter atmosphere, dramatic lighting.",5.0, 0.9,4096,0.99],
284
+ ["Portrait of an asian woman. She has pink violet hair style with modern complex hairdressing. The background is dark with cyberpunk neon lights. Inspired by Cyberpunk 2077 and Blade Runner. Ultra realistic picture. To capture the image, you will use a fullframe DSLR or mirrorless camera with a highresolution sensor, an aperture of f2.8 or wider, and a shutter speed of 1500 second or faster. You will use natural light and reflectors to create a balanced and welllit image, and will experiment with different angles and compositions to create the most i",5.0, 0.9,4096,0.99],
285
+ ["female character fantasy world, for fantasy story, protagonist, interesting and detailed clothes, beautiful, medieval fantasy cinematic shot photo taken by canon, photo taken by fuji, photo taken by kodak incredibly detailed, sharpen, details professional lighting , film lighting 350mm lightroom cinematography, hyper realism, cinematic, film quality",5.0, 0.9,4096,0.99],
286
+ ["strawberries splashing, swirling liquid, realism, octane render, raytracing",5.0, 0.9,4096,0.99],
287
+ ["hedgehog face, floating in space, wearing space suit no helmet, cinematic, 50mm f1.8, unreal engine 5",5.0, 0.9,4096,0.99],
288
+ ["artificial intelligence, revolution, publishing, writer, hyperrealistic",5.0, 0.9,4096,0.99],
289
+ ["A pig dressed as a mason, by Bill Gekas",5.0, 0.9,4096,0.99],
290
+ ],
291
+ stop_btn="Stop Generation",
292
+ additional_inputs = [guidance_scale, temperature, top_K, top_P],
293
+ additional_inputs_accordion="⚙️ Advanced Settings",
294
+ multimodal=False,
295
+ textbox=chat_input_T2I,
296
+ chatbot=chatbot_T2I,
297
+ fill_height=True,
298
+ )
299
+
300
+ with gr.Tab("Image To Text"):
301
+ bbb = gr.ChatInterface(
302
+ fn=bot_streaming_I2T,
303
+ examples=[ {"text": "How to make this pastry?", "files": ["./baklava.png"]}],
304
+ description="Upload an image and start chatting about it, or simply try one of the examples below. If you don't upload an image, you will receive an error.",
305
+ stop_btn="Stop Generation",
306
+ multimodal=True,
307
+ textbox=chat_input_I2T,
308
+ chatbot=chatbot_I2T,
309
+ )
310
+
311
+
312
+
313
+ demo.queue(api_open=False)
314
+ demo.launch(allowed_paths=["./"],server_port=2560, share=False )
helpers.py ADDED
@@ -0,0 +1,75 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import torch
2
+ from torch.nn import functional as F
3
+ from PIL import Image
4
+
5
+
6
+
7
+ def top_k_top_p_filtering(
8
+ logits,
9
+ top_k: int = 0,
10
+ top_p: float = 1.0,
11
+ filter_value: float = -float("Inf"),
12
+ min_tokens_to_keep: int = 1,
13
+ ):
14
+ """Filter a distribution of logits using top-k and/or nucleus (top-p) filtering
15
+ Args:
16
+ logits: logits distribution shape (batch size, vocabulary size)
17
+ if top_k > 0: keep only top k tokens with highest probability (top-k filtering).
18
+ if top_p < 1.0: keep the top tokens with cumulative probability >= top_p (nucleus filtering).
19
+ Nucleus filtering is described in Holtzman et al. (http://arxiv.org/abs/1904.09751)
20
+ Make sure we keep at least min_tokens_to_keep per batch example in the output
21
+ From: https://gist.github.com/thomwolf/1a5a29f6962089e871b94cbd09daf317
22
+ """
23
+
24
+ if top_k > 0:
25
+ top_k = min(max(top_k, min_tokens_to_keep), logits.size(-1)) # Safety check
26
+ # Remove all tokens with a probability less than the last token of the top-k
27
+
28
+ indices_to_remove = logits < torch.topk(logits, top_k)[0][..., -1, None]
29
+ logits[indices_to_remove] = filter_value
30
+
31
+ if top_p < 1.0:
32
+ sorted_logits, sorted_indices = torch.sort(logits, descending=True)
33
+ cumulative_probs = torch.cumsum(F.softmax(sorted_logits, dim=-1), dim=-1)
34
+
35
+ # Remove tokens with cumulative probability above the threshold (token with 0 are kept)
36
+ sorted_indices_to_remove = cumulative_probs > top_p
37
+ if min_tokens_to_keep > 1:
38
+ # Keep at least min_tokens_to_keep (set to min_tokens_to_keep-1 because we add the first one below)
39
+ sorted_indices_to_remove[..., :min_tokens_to_keep] = 0
40
+ # Shift the indices to the right to keep also the first token above the threshold
41
+ sorted_indices_to_remove[..., 1:] = sorted_indices_to_remove[..., :-1].clone()
42
+ sorted_indices_to_remove[..., 0] = 0
43
+
44
+ # scatter sorted tensors to original indexing
45
+ indices_to_remove = sorted_indices_to_remove.scatter(1, sorted_indices, sorted_indices_to_remove)
46
+ logits[indices_to_remove] = filter_value
47
+ # import pdb;pdb.set_trace()
48
+ return logits
49
+
50
+
51
+ def sample(logits, temperature: float = 1.0, top_k: int = 0, top_p: float = 1.0, sample_logits=True):
52
+ logits = logits[:, -1, :] / max(temperature, 1e-5)
53
+ if top_k > 0 or top_p < 1.0:
54
+ logits = top_k_top_p_filtering(logits, top_k=top_k, top_p=top_p)
55
+ probs = F.softmax(logits, dim=-1)
56
+ if sample_logits:
57
+ idx = torch.multinomial(probs, num_samples=1)
58
+ else:
59
+ _, idx = torch.topk(probs, k=1, dim=-1)
60
+ return idx, probs
61
+
62
+
63
+ def expand2square(pil_img, background_color):
64
+ width, height = pil_img.size
65
+ if width == height:
66
+ return pil_img
67
+ elif width > height:
68
+ result = Image.new(pil_img.mode, (width, width), background_color)
69
+ result.paste(pil_img, (0, (width - height) // 2))
70
+ return result
71
+ else:
72
+ result = Image.new(pil_img.mode, (height, height), background_color)
73
+ result.paste(pil_img, ((height - width) // 2, 0))
74
+ return result
75
+
inference_i2t.py ADDED
@@ -0,0 +1,110 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import torch
2
+ import argparse
3
+ import PIL
4
+ from PIL import Image
5
+ import os
6
+ from transformers import AutoConfig, AutoTokenizer, AutoModelForCausalLM, TextIteratorStreamer
7
+ from conversation import conv_templates, SeparatorStyle
8
+ from torchvision import transforms
9
+
10
+ from constants import IMAGE_TOKEN_INDEX, DEFAULT_IMAGE_TOKEN
11
+
12
+ from threading import Thread
13
+ from unitok.config import Args
14
+ from unitok.model import UniTok
15
+
16
+ from model.builder import load_pretrained_model
17
+ from mm_utils import tokenizer_image_token, get_model_name_from_path
18
+
19
+
20
+ IMAGE_TOKEN_INDEX=-200
21
+
22
+ def expand2square(pil_img, background_color):
23
+ width, height = pil_img.size
24
+ if width == height:
25
+ return pil_img
26
+ elif width > height:
27
+ result = Image.new(pil_img.mode, (width, width), background_color)
28
+ result.paste(pil_img, (0, (width - height) // 2))
29
+ return result
30
+ else:
31
+ result = Image.new(pil_img.mode, (height, height), background_color)
32
+ result.paste(pil_img, ((height - width) // 2, 0))
33
+ return result
34
+
35
+
36
+ def main(args):
37
+
38
+ ckpt = torch.load(args.unitok_path, map_location='cpu')
39
+ vae_cfg = Args()
40
+ vae_cfg.load_state_dict(ckpt['args'])
41
+ vq_model = UniTok(vae_cfg)
42
+ vq_model.load_state_dict(ckpt['trainer']['unitok'])
43
+ vq_model.to('cuda')
44
+ vq_model.eval()
45
+
46
+ model_path = os.path.expanduser(args.mllm_path)
47
+ model_name = get_model_name_from_path(model_path)
48
+ tokenizer, vqllm, image_processor, context_len = load_pretrained_model(model_path, model_name, load_8bit=args.load_8bit)
49
+
50
+ qs = args.prompt
51
+ qs = '<boi><image><eoi>' + '\n' + qs
52
+ conv = conv_templates['llava_v1'].copy()
53
+ conv.append_message(conv.roles[0], qs)
54
+ conv.append_message(conv.roles[1], None)
55
+ prompt = conv.get_prompt()
56
+
57
+ crop_size = 256
58
+ transform = transforms.Compose([
59
+ transforms.Resize((crop_size, crop_size)),
60
+ transforms.ToTensor(),
61
+ transforms.Normalize(mean=[0.5, 0.5, 0.5], std=[0.5, 0.5, 0.5], inplace=True)
62
+ ])
63
+
64
+ print(prompt)
65
+ image = Image.open(args.image_path).convert('RGB')
66
+ pad_image = expand2square(image, (122, 116, 104) )
67
+ # import pdb;pdb.set_trace()
68
+ img = transform(pad_image).unsqueeze(0)
69
+ img = img.to('cuda')
70
+ # import pdb;pdb.set_trace()
71
+ with torch.no_grad():
72
+ vq_code = vq_model.img_to_idx(img)
73
+ image_codes = vq_code.unsqueeze(0)
74
+
75
+ input_ids = tokenizer_image_token(prompt, tokenizer, IMAGE_TOKEN_INDEX, return_tensors='pt')
76
+
77
+ # input_ids = torch.cat(text_ids, dim=0)
78
+ # input_embeddings = vqllm.embed_tokens(input_ids)
79
+ inputs = {
80
+ "inputs":input_ids.unsqueeze(0).to("cuda:0"),
81
+ "images":image_codes.to("cuda:0"),
82
+ "max_new_tokens":1024,
83
+ "bos_token_id":tokenizer.bos_token_id, # Begin of sequence token
84
+ "eos_token_id":tokenizer.eos_token_id, # End of sequence token
85
+ "pad_token_id":tokenizer.pad_token_id, # Pad token
86
+ }
87
+ streamer = TextIteratorStreamer(tokenizer, **{"skip_special_tokens": True, "skip_prompt": True})
88
+
89
+ # Run the generation in a separate thread, so that we can fetch the generated text in a non-blocking way.
90
+ generation_kwargs = dict(inputs, streamer=streamer, max_new_tokens=1024)
91
+ thread = Thread(target=vqllm.generate_mllm, kwargs=generation_kwargs)
92
+ thread.start()
93
+ generated_text = ""
94
+ for new_text in streamer:
95
+ generated_text += new_text
96
+ print(generated_text)
97
+
98
+
99
+ if __name__ == '__main__':
100
+ parser = argparse.ArgumentParser(description='Process some integers.')
101
+ parser.add_argument('--unitok_path', type=str, default=r'D:\projects\liquid_app\UniTok\UniTok_weights\unitok_tokenizer\unitok_tokenizer.pth',required=False)
102
+ parser.add_argument('--mllm_path', type=str, default= r'C:\debug_ckpts\unitok_mllm', required=False)
103
+ parser.add_argument('--prompt', type=str, required=True, help='input text prompt')
104
+ parser.add_argument('--image_path', type=str, required=True, help='input image path')
105
+ parser.add_argument('--load_8bit', action='store_true', default=False, help='use 8bit to save memory')
106
+
107
+ args = parser.parse_args()
108
+ main(args)
109
+
110
+
t2i_new.py ADDED
@@ -0,0 +1,243 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import os
2
+ import sys
3
+ import torch
4
+ import argparse
5
+ import numpy as np
6
+ from tqdm import tqdm
7
+ from torchvision import transforms
8
+ from torch.nn import functional as F
9
+ from transformers import AutoTokenizer, AutoModelForCausalLM
10
+
11
+ from model import *
12
+ from unitok.config import Args
13
+ from unitok.model import UniTok
14
+
15
+
16
+ PILtransform = transforms.ToPILImage()
17
+
18
+
19
+ def top_k_top_p_filtering(
20
+ logits,
21
+ top_k: int = 0,
22
+ top_p: float = 1.0,
23
+ filter_value: float = -float("Inf"),
24
+ min_tokens_to_keep: int = 1,
25
+ ):
26
+ """Filter a distribution of logits using top-k and/or nucleus (top-p) filtering
27
+ Args:
28
+ logits: logits distribution shape (batch size, vocabulary size)
29
+ if top_k > 0: keep only top k tokens with highest probability (top-k filtering).
30
+ if top_p < 1.0: keep the top tokens with cumulative probability >= top_p (nucleus filtering).
31
+ Nucleus filtering is described in Holtzman et al. (http://arxiv.org/abs/1904.09751)
32
+ Make sure we keep at least min_tokens_to_keep per batch example in the output
33
+ From: https://gist.github.com/thomwolf/1a5a29f6962089e871b94cbd09daf317
34
+ """
35
+
36
+ if top_k > 0:
37
+ top_k = min(max(top_k, min_tokens_to_keep), logits.size(-1)) # Safety check
38
+ # Remove all tokens with a probability less than the last token of the top-k
39
+
40
+ indices_to_remove = logits < torch.topk(logits, top_k)[0][..., -1, None]
41
+ logits[indices_to_remove] = filter_value
42
+
43
+ if top_p < 1.0:
44
+ sorted_logits, sorted_indices = torch.sort(logits, descending=True)
45
+ cumulative_probs = torch.cumsum(F.softmax(sorted_logits, dim=-1), dim=-1)
46
+
47
+ # Remove tokens with cumulative probability above the threshold (token with 0 are kept)
48
+ sorted_indices_to_remove = cumulative_probs > top_p
49
+ if min_tokens_to_keep > 1:
50
+ # Keep at least min_tokens_to_keep (set to min_tokens_to_keep-1 because we add the first one below)
51
+ sorted_indices_to_remove[..., :min_tokens_to_keep] = 0
52
+ # Shift the indices to the right to keep also the first token above the threshold
53
+ sorted_indices_to_remove[..., 1:] = sorted_indices_to_remove[..., :-1].clone()
54
+ sorted_indices_to_remove[..., 0] = 0
55
+
56
+ # scatter sorted tensors to original indexing
57
+ indices_to_remove = sorted_indices_to_remove.scatter(1, sorted_indices, sorted_indices_to_remove)
58
+ logits[indices_to_remove] = filter_value
59
+ # import pdb;pdb.set_trace()
60
+ return logits
61
+
62
+
63
+ def sample(logits, temperature: float = 1.0, top_k: int = 0, top_p: float = 1.0, sample_logits=True):
64
+ logits = logits[:, -1, :] / max(temperature, 1e-5)
65
+ if top_k > 0 or top_p < 1.0:
66
+ logits = top_k_top_p_filtering(logits, top_k=top_k, top_p=top_p)
67
+ probs = F.softmax(logits, dim=-1)
68
+ if sample_logits:
69
+ idx = torch.multinomial(probs, num_samples=1)
70
+ else:
71
+ _, idx = torch.topk(probs, k=1, dim=-1)
72
+ return idx, probs
73
+
74
+
75
+ def split_list(input_list, chunk_size):
76
+ return [input_list[i:i + chunk_size] for i in range(0, len(input_list), chunk_size)]
77
+
78
+
79
+ def get_args_parser():
80
+ parser = argparse.ArgumentParser('Set transformer detector', add_help=False)
81
+ parser.add_argument('--unitok_path', type=str, default=r'D:\projects\liquid_app\UniTok\UniTok_weights\unitok_tokenizer\unitok_tokenizer.pth',required=False)
82
+ parser.add_argument('--mllm_path', type=str, default= r'C:\debug_ckpts\unitok_mllm', required=False)
83
+ # parser.add_argument('--prompt_file', type=str, required=True)
84
+ # parser.add_argument('--result_dir', type=str, required=True)
85
+ parser.add_argument('--idx', type=int, default=0)
86
+ parser.add_argument('--tau', type=float, default=0.9)
87
+ parser.add_argument('--topk', type=int, default=2048)
88
+ parser.add_argument('--topp', type=float, default=0.96)
89
+ parser.add_argument('--cfg_scale', type=float, default=5.0)
90
+ return parser
91
+
92
+
93
+ def main(args):
94
+ text_set_id = args.idx
95
+ tau = args.tau
96
+ topk = args.topk
97
+ topp = args.topp
98
+ cfg_scale = args.cfg_scale
99
+
100
+ print('loading vq model ...')
101
+ ckpt = torch.load(args.unitok_path, map_location='cpu')
102
+ vae_cfg = Args()
103
+ vae_cfg.load_state_dict(ckpt['args'])
104
+ vq_model = UniTok(vae_cfg)
105
+ vq_model.load_state_dict(ckpt['trainer']['unitok'])
106
+ vq_model.to('cuda')
107
+ vq_model.eval()
108
+
109
+
110
+ tokenizer = AutoTokenizer.from_pretrained(args.mllm_path, padding_side='left')
111
+ vqllm = MiniGeminiLlamaForCausalLM.from_pretrained(
112
+ args.mllm_path,
113
+ attn_implementation='flash_attention_2',
114
+ torch_dtype=torch.bfloat16
115
+ ).to('cuda')
116
+ num_codebooks = vae_cfg.num_codebooks
117
+ # import pdb;pdb.set_trace()
118
+ chunk_inputs = [[{'Prompt':'a dog in grasee'},{'Prompt':'a dog in grasee'},{'Prompt':'a dog in grasee'},{'Prompt':'a dog in grasee'}]]
119
+ for chunk in tqdm(chunk_inputs):
120
+ text_inputs = [v['Prompt'] for v in chunk]
121
+ uncondition_text_inputs = ['<unconditional>\x00'] * len(text_inputs)
122
+ for i in range(len(text_inputs)):
123
+ text_inputs[i] = text_inputs[i] + ' Generate an image based on this description.\x00'
124
+ ori_batchsize = len(text_inputs)
125
+
126
+ save_list = []
127
+ if cfg_scale > 1:
128
+ model_inputs = tokenizer(text_inputs + uncondition_text_inputs, return_tensors="pt", padding=True).to('cuda')
129
+ else:
130
+ model_inputs = tokenizer(text_inputs, return_tensors="pt", padding=True).to('cuda')
131
+
132
+
133
+ model_kwargs = {'attention_mask':model_inputs.pop('attention_mask'),
134
+ 'use_cache': True
135
+ }
136
+ input_ids = model_inputs.pop('input_ids')
137
+ batch_size, cur_len = input_ids.shape
138
+ if "inputs_embeds" in model_kwargs:
139
+ cur_len = model_kwargs["inputs_embeds"].shape[1]
140
+ model_kwargs["cache_position"] = torch.arange(cur_len, device=input_ids.device)
141
+ # import pdb;pdb.set_trace()
142
+
143
+
144
+ with torch.no_grad():
145
+ sampling_kwargs = {'temperature': tau, 'top_k': topk, 'top_p': topp, 'sample_logits': True}
146
+
147
+ pred_tokens = []
148
+ input_multi_ids = None
149
+ for i in tqdm(range(256)):
150
+ model_inputs = vqllm.prepare_inputs_for_generation(input_ids, **model_kwargs) #model_inputs['input_ids'], model_inputs['position_ids'], model_inputs['cache_position']
151
+ # import pdb;pdb.set_trace() #model_inputs['position_ids'] , model_inputs['cache_position']
152
+ outputs = vqllm.T2I_forward_withcache(
153
+ **model_inputs,
154
+ input_multi_ids=input_multi_ids,
155
+ return_dict=True,
156
+ output_attentions=False,
157
+ output_hidden_states=False,
158
+ )
159
+
160
+ # import pdb;pdb.set_trace()
161
+ next_embed = outputs['last_hidden_state'][:, -1:, :]
162
+ # next_token_logits = outputs.logits[:, -1:, :]
163
+ indices_arhead = []
164
+
165
+ # for i_head in range(num_codebooks):
166
+ # ar_next_embed = vqllm.ar_head(inputs_embeds=next_embed,use_cache=False,output_attentions=False, output_hidden_states=False, return_dict=False,)
167
+ # next_token_logits = vqllm.ar_head.linear_head(ar_next_embed[:,-1:,:])
168
+ # # import pdb;pdb.set_trace()
169
+ # image_probs = F.softmax(next_token_logits, dim=-1)
170
+ # _, image_idx = torch.topk(image_probs, k=1, dim=-1) #[numcodebook,256,1]
171
+ # next_token = image_idx[:,:,0]
172
+ # indices_arhead.append(next_token)
173
+ # # import pdb;pdb.set_trace()
174
+ # # pred_tokens.append(next_token)
175
+ # if i_head<num_codebooks-1:
176
+ # predicted_embed = vqllm.ar_head.codebooks[i_head](next_token)
177
+ # next_embed = torch.cat([next_embed,predicted_embed],dim=1)
178
+
179
+ for i_head in range(num_codebooks):
180
+ ar_next_embed = vqllm.ar_head(
181
+ inputs_embeds=next_embed,
182
+ use_cache=False,
183
+ output_attentions=False,
184
+ output_hidden_states=False,
185
+ return_dict=False,
186
+ )
187
+ next_token_logits = vqllm.ar_head.linear_head(ar_next_embed[0])
188
+ if cfg_scale > 1:
189
+ cond_logits, uncond_logits = torch.split(next_token_logits, len(next_token_logits) // 2, dim=0)
190
+ cfg_logits = uncond_logits + (cond_logits - uncond_logits) * cfg_scale
191
+ half_next_token, _ = sample(cfg_logits, **sampling_kwargs)
192
+ # pred_tokens.append(half_next_token)
193
+ next_token = torch.cat([half_next_token, half_next_token]) # [bz,1]
194
+ else:
195
+ next_token, next_prob = sample(next_token_logits, **sampling_kwargs)
196
+ # pred_tokens.append(next_token)
197
+ # import pdb;pdb.set_trace()
198
+ indices_arhead.append(next_token)
199
+ if i_head < num_codebooks - 1:
200
+ predicted_embed = vqllm.ar_head.codebooks[i_head](next_token)
201
+ next_embed = torch.cat([next_embed, predicted_embed], dim=1)
202
+
203
+ # update generated ids, model inputs, and length for next step
204
+ # import pdb;pdb.set_trace()
205
+ pred_tokens.append(torch.cat(indices_arhead, dim=1)) # [numcodebook,bz*2]
206
+ input_multi_ids = torch.stack(pred_tokens, dim=-1)
207
+ # import pdb;pdb.set_trace()
208
+ fake_id = torch.zeros_like(input_ids[:,:1])
209
+ input_ids = torch.cat([input_ids, fake_id], dim=-1) # add fake id for cache
210
+
211
+ model_kwargs = vqllm._update_model_kwargs_for_generation(
212
+ outputs,
213
+ model_kwargs,
214
+ is_encoder_decoder=vqllm.config.is_encoder_decoder,
215
+ )
216
+ pass
217
+ del sampling_kwargs
218
+ del model_inputs
219
+ del outputs
220
+ del model_kwargs
221
+ # image_vq_id = input_ids[:,prompt_length:prompt_length+256]-ori_vocabe_size
222
+ image_vq_id = torch.stack(pred_tokens, dim=-1)[:ori_batchsize]
223
+ # print(set(image_vq_id.tolist()))
224
+ save_list.append(image_vq_id)
225
+
226
+ torch.cuda.empty_cache()
227
+
228
+ print('decoding images ...')
229
+ image_save_pth = 'visualresults'
230
+ if not os.path.exists(image_save_pth):
231
+ os.makedirs(image_save_pth)
232
+ for datainfo, vq_code in zip(chunk, save_list[0]):
233
+ new_gen_ids = vq_code.unsqueeze(0).to('cuda')
234
+ # import pdb;pdb.set_trace()
235
+ rec_image = vq_model.idx_to_img(new_gen_ids)
236
+ rec_img = PILtransform(rec_image.squeeze(0).add(1).mul_(0.5).clamp_(0, 1))
237
+ rec_img.save('{}/{}.jpg'.format(image_save_pth, 'test'))
238
+
239
+
240
+ if __name__ == '__main__':
241
+ parser = argparse.ArgumentParser('genai inference script', parents=[get_args_parser()])
242
+ args = parser.parse_args()
243
+ main(args)