khang119966's picture
Update app.py
b635797 verified
raw
history blame
24.8 kB
import gradio as gr
import torch
from transformers import AutoModelForCausalLM, AutoTokenizer, TextIteratorStreamer, StoppingCriteria
import gradio as gr
import spaces
import torch
import numpy as np
import torch
import torchvision.transforms as T
from PIL import Image
from torchvision.transforms.functional import InterpolationMode
from transformers import AutoModel, AutoTokenizer
from PIL import Image, ExifTags
import cv2
import numpy as np
import torch
from html2image import Html2Image
import tempfile
import os
import uuid
from scipy.ndimage import gaussian_filter
from threading import Thread
import re
import time
from PIL import Image
import torch
import spaces
import subprocess
import os
from moviepy.editor import VideoFileClip, AudioFileClip
import multiprocessing
import imageio
import tqdm
from concurrent.futures import ProcessPoolExecutor
subprocess.run('pip install flash-attn --no-build-isolation', env={'FLASH_ATTENTION_SKIP_CUDA_BUILD': "TRUE"}, shell=True)
env = {'FLASH_ATTENTION_SKIP_CUDA_BUILD': "TRUE"}
subprocess.run('snap install chromium', env=env, shell=True)
subprocess.run('wget https://dl.google.com/linux/direct/google-chrome-stable_current_amd64.deb', env=env, shell=True)
subprocess.run('DEBIAN_FRONTEND=noninteractive dpkg -i google-chrome-stable_current_amd64.deb || apt-get -y -f install', env=env, shell=True)
subprocess.run('apt-get update -y', env=env, shell=True)
subprocess.run('apt-get install -y wkhtmltopdf', env=env, shell=True)
subprocess.run('apt-get install -y xvfb', env=env, shell=True)
import imgkit
imgkit.from_string("""
<html>
<head>
<meta charset="utf-8">
</head>
<body>
<table border="1" cellspacing="0" cellpadding="8" style="font-family:'Noto Sans'; font-size:12px; border-collapse: collapse;">
<tr>
<th colspan="4" style="text-align:center; font-size:14px; padding:10px;">
Top hidden tokens per layer for the Prediction
</th>
</tr>
<tr>
<th>Layer ⬆️</th>
<th>Top 1</th>
<th>Top 2</th>
<th>Top 3</th>
</tr>
</table>
</body>
</html>
""", 'out.jpg')
print(Image.open('out.jpg'))
torch.set_default_device('cuda')
IMAGENET_MEAN = (0.485, 0.456, 0.406)
IMAGENET_STD = (0.229, 0.224, 0.225)
def build_transform(input_size):
MEAN, STD = IMAGENET_MEAN, IMAGENET_STD
transform = T.Compose([
T.Lambda(lambda img: img.convert('RGB') if img.mode != 'RGB' else img),
T.Resize((input_size, input_size), interpolation=InterpolationMode.BICUBIC),
T.ToTensor(),
T.Normalize(mean=MEAN, std=STD)
])
return transform
def find_closest_aspect_ratio(aspect_ratio, target_ratios, width, height, image_size):
best_ratio_diff = float('inf')
best_ratio = (1, 1)
area = width * height
for ratio in target_ratios:
target_aspect_ratio = ratio[0] / ratio[1]
ratio_diff = abs(aspect_ratio - target_aspect_ratio)
if ratio_diff < best_ratio_diff:
best_ratio_diff = ratio_diff
best_ratio = ratio
elif ratio_diff == best_ratio_diff:
if area > 0.5 * image_size * image_size * ratio[0] * ratio[1]:
best_ratio = ratio
return best_ratio
def dynamic_preprocess(image, min_num=1, max_num=12, image_size=448, use_thumbnail=False):
orig_width, orig_height = image.size
aspect_ratio = orig_width / orig_height
# calculate the existing image aspect ratio
target_ratios = set(
(i, j) for n in range(min_num, max_num + 1) for i in range(1, n + 1) for j in range(1, n + 1) if
i * j <= max_num and i * j >= min_num)
target_ratios = sorted(target_ratios, key=lambda x: x[0] * x[1])
# find the closest aspect ratio to the target
target_aspect_ratio = find_closest_aspect_ratio(
aspect_ratio, target_ratios, orig_width, orig_height, image_size)
# calculate the target width and height
target_width = image_size * target_aspect_ratio[0]
target_height = image_size * target_aspect_ratio[1]
blocks = target_aspect_ratio[0] * target_aspect_ratio[1]
# resize the image
resized_img = image.resize((target_width, target_height))
processed_images = []
for i in range(blocks):
box = (
(i % (target_width // image_size)) * image_size,
(i // (target_width // image_size)) * image_size,
((i % (target_width // image_size)) + 1) * image_size,
((i // (target_width // image_size)) + 1) * image_size
)
# split the image
split_img = resized_img.crop(box)
processed_images.append(split_img)
assert len(processed_images) == blocks
if use_thumbnail and len(processed_images) != 1:
thumbnail_img = image.resize((image_size, image_size))
processed_images.append(thumbnail_img)
return processed_images, target_aspect_ratio
def correct_image_orientation(image_path):
# Mở ảnh
image = Image.open(image_path)
# Kiểm tra dữ liệu Exif (nếu có)
try:
exif = image._getexif()
if exif is not None:
for tag, value in exif.items():
if ExifTags.TAGS.get(tag) == "Orientation":
# Sửa hướng dựa trên Orientation
if value == 3:
image = image.rotate(180, expand=True)
elif value == 6:
image = image.rotate(-90, expand=True)
elif value == 8:
image = image.rotate(90, expand=True)
break
except Exception as e:
print("Không thể xử lý Exif:", e)
return image
def load_image(image_file, input_size=448, max_num=12, target_aspect_ratio=False):
image = correct_image_orientation(image_file).convert('RGB')
transform = build_transform(input_size=input_size)
images, target_aspect_ratio = dynamic_preprocess(image, image_size=input_size, use_thumbnail=True, max_num=max_num)
pixel_values = [transform(image) for image in images]
pixel_values = torch.stack(pixel_values)
if target_aspect_ratio:
return pixel_values, target_aspect_ratio
else:
return pixel_values
def visualize_attention_hiddenstate(attention_tensor, head=None, start_img_token_index=0, end_img_token_index=0, target_aspect_ratio=(0,0)):
"""Vẽ heatmap của attention scores từ trung bình 8 layer cuối và trả về top 5 token có attention cao nhất."""
last_8_layers = attention_tensor[-8:] # Lấy 8 layer cuối
averaged_layer = np.mean(last_8_layers,axis=0) # Trung bình 8 layer cuối
if head is None:
averaged_attention = averaged_layer.mean(axis=1).squeeze() # Trung bình qua các head
else:
averaged_attention = averaged_layer[:, head, :, :].squeeze() # Chọn head cụ thể
heat_maps = []
top_5_tokens = []
for i in range(len(averaged_attention)): # Duyệt qua các beam
h_target_aspect_ratio = target_aspect_ratio[1] if target_aspect_ratio[1] != 0 else 1
w_target_aspect_ratio = target_aspect_ratio[0] if target_aspect_ratio[0] != 0 else 1
img_atten_score = averaged_attention[i].reshape(-1)[start_img_token_index:end_img_token_index]
# Lấy index của 5 token có attention cao nhất
top_5_indices = np.argsort(img_atten_score)[-5:][::-1] # Sắp xếp giảm dần
top_5_values = img_atten_score[top_5_indices]
# top_5_tokens.append(list(zip(top_5_indices + start_img_token_index, top_5_values)))
top_5_tokens.append(list(top_5_indices + start_img_token_index))
# Reshape lại attention để vẽ heatmap
img_atten_score = img_atten_score.reshape(h_target_aspect_ratio, w_target_aspect_ratio, 16, 16)
img_atten_score = np.transpose(img_atten_score, (0, 2, 1, 3)).reshape(h_target_aspect_ratio * 16, w_target_aspect_ratio * 16)
img_atten_score = np.power(img_atten_score, 0.9)
heat_maps.append(img_atten_score)
return heat_maps, top_5_tokens
def generate_next_token_table_image(model, tokenizer, response, index_focus):
next_token_table = []
for layer_index in range(len(response.hidden_states[index_focus])):
h_out = model.language_model.lm_head(
model.language_model.model.norm(response.hidden_states[index_focus][layer_index][0])
)
h_out = torch.softmax(h_out, -1)
top_tokens = []
for token_index in h_out.argsort(descending=True)[0, :3]: # Top 3
token_str = tokenizer.decode(token_index)
prob = float(h_out[0, int(token_index)])
top_tokens.append((token_str, prob))
next_token_table.append((layer_index, top_tokens))
next_token_table = next_token_table[::-1]
html_rows = ""
last_layer_index = len(next_token_table) - 1
for i, (layer_index, tokens) in enumerate(next_token_table):
row = f"<tr><td style='font-weight: bold'>Layer {layer_index}</td>"
# For the first column (Top 1)
token_str, prob = tokens[0]
# If this is the last layer in the table, make the text blue
if layer_index == last_layer_index:
row += f"<td><span style='color: red; font-weight: bold'>{token_str}</span> ({prob:.2%})</td>"
else:
row += f"<td><span style='color: blue; font-weight: bold'>{token_str}</span> ({prob:.2%})</td>"
# For the other columns, keep normal formatting
for token_str, prob in tokens[1:]:
row += f"<td>{token_str} ({prob:.2%})</td>"
row += "</tr>"
html_rows += row
html_code = f'''
<html>
<head>
<meta charset="utf-8">
<style>
table {{
font-family: 'Noto Sans';
font-size: 12px;
border-collapse: collapse;
table-layout: fixed;
width: 100%;
}}
th, td {{
border: 1px solid black;
padding: 8px;
width: 150px;
height: 30px;
overflow: hidden;
text-overflow: ellipsis;
white-space: nowrap;
text-align: center;
}}
th.layer {{
width: 100px;
}}
th.title {{
font-size: 14px;
padding: 10px;
height: auto;
white-space: normal;
overflow: visible;
}}
</style>
</head>
<body style="background-color: white;">
<table>
<tr>
<th colspan="4" class="title">
Top hidden tokens per layer for the Prediction
</th>
</tr>
<tr>
<th class="layer">Layer ⬆️</th>
<th>Top 1</th>
<th>Top 2</th>
<th>Top 3</th>
</tr>
{html_rows}
</table>
</body>
</html>
'''
with tempfile.TemporaryDirectory() as tmpdir:
hti = Html2Image(output_path=tmpdir)
hti.browser_flags = [
"--headless=new", # ← Dùng chế độ headless mới
"--disable-gpu", # ← Tắt GPU
"--disable-software-rasterizer", # ← Tránh dùng fallback GPU software
"--no-sandbox", # ← Tránh lỗi sandbox đa luồng
]
filename = str(uuid.uuid4())+".png"
# filename = 'next_token_table.png'
hti.screenshot(html_str=html_code, save_as=filename, size=(500, 1000))
img_path = os.path.join(tmpdir, filename)
img_cv2 = cv2.imread(img_path)[:,:,::-1]
os.remove(img_path)
return img_cv2
def adjust_overlay(overlay, text_img):
h_o, w_o = overlay.shape[:2]
h_t, w_t = text_img.shape[:2]
if h_o > w_o: # Overlay là ảnh đứng
# Resize overlay sao cho h = h_t, giữ nguyên tỷ lệ
new_h = h_t
new_w = int(w_o * (new_h / h_o))
overlay_resized = cv2.resize(overlay, (new_w, new_h))
else: # Overlay là ảnh ngang
# Giữ nguyên overlay, nhưng nếu h < h_t thì thêm padding trắng
overlay_resized = overlay.copy()
# Thêm padding trắng nếu overlay có h < h_t
if overlay_resized.shape[0] < h_t:
pad_h = h_t - overlay_resized.shape[0]
padding = np.ones((pad_h, overlay_resized.shape[1], 3), dtype=np.uint8) * 255
overlay_resized = np.vstack((overlay_resized, padding)) # Padding vào dưới
# Đảm bảo overlay có cùng chiều cao với text_img
if overlay_resized.shape[0] != h_t:
overlay_resized = cv2.resize(overlay_resized, (overlay_resized.shape[1], h_t))
return overlay_resized
def generate_text_image_with_html2image(old_text, input_token, new_token, image_width=400, min_height=1000, font_size=16):
full_text = old_text + f"<span style='color:blue; font-weight:bold'>[{input_token}]</span>"+ "→" + f"<span style='color:red; font-weight:bold'>[{new_token}]</span>"
# Thay \n bằng thẻ HTML <br> để xuống dòng
full_text = full_text.replace('\n', '<br>')
html_code = f'''
<html>
<head>
<meta charset="utf-8">
</head>
<body style="font-family: 'DejaVu Sans', sans-serif; font-size: {font_size}px; width: {image_width}px; min-height: {min_height}px; padding: 10px; background-color: white; line-height: 1.4;">
{full_text}
</body>
</html>
'''
save_path = str(uuid.uuid4())+".png"
hti = Html2Image(output_path='.')
hti.browser_flags = [
"--headless=new", # ← Dùng chế độ headless mới
"--disable-gpu", # ← Tắt GPU
"--disable-software-rasterizer", # ← Tránh dùng fallback GPU software
"--no-sandbox", # ← Tránh lỗi sandbox đa luồng
]
hti.screenshot(html_str=html_code, save_as=save_path, size=(image_width, min_height))
text_img = cv2.imread(save_path)
text_img = cv2.cvtColor(text_img, cv2.COLOR_BGR2RGB)
os.remove(save_path)
return text_img
def extract_next_token_table_data(model, tokenizer, response, index_focus):
next_token_table = []
for layer_index in range(len(response.hidden_states[index_focus])):
h_out = model.language_model.lm_head(
model.language_model.model.norm(response.hidden_states[index_focus][layer_index][0])
)
h_out = torch.softmax(h_out, -1)
top_tokens = []
for token_index in h_out.argsort(descending=True)[0, :3]: # Top 3
token_str = tokenizer.decode(token_index)
prob = float(h_out[0, int(token_index)])
top_tokens.append((token_str, prob))
next_token_table.append((layer_index, top_tokens))
next_token_table = next_token_table[::-1]
return next_token_table
def render_next_token_table_image(table_data, predict_token):
import tempfile, uuid, os
from html2image import Html2Image
import cv2
html_rows = ""
last_layer_index = len(table_data)
for layer_index, tokens in table_data:
row = f"<tr><td style='font-weight: bold'>Layer {layer_index+1}</td>"
token_str, prob = tokens[0]
if token_str == predict_token:
style = "color: red; font-weight: bold"
else:
style = "color: blue; font-weight: bold"
row += f"<td><span style='{style}'>{token_str}</span> ({prob:.2%})</td>"
for token_str, prob in tokens[1:]:
row += f"<td>{token_str} ({prob:.2%})</td>"
row += "</tr>"
html_rows += row
html_code = f'''
<html>
<head>
<meta charset="utf-8">
<style>
table {{
font-family: 'Noto Sans';
font-size: 12px;
border-collapse: collapse;
table-layout: fixed;
width: 100%;
}}
th, td {{
border: 1px solid black;
padding: 8px;
width: 150px;
height: 30px;
overflow: hidden;
text-overflow: ellipsis;
white-space: nowrap;
text-align: center;
}}
th.layer {{
width: 100px;
}}
th.title {{
font-size: 14px;
padding: 10px;
height: auto;
white-space: normal;
overflow: visible;
}}
</style>
</head>
<body style="background-color: white;">
<table>
<tr>
<th colspan="4" class="title">
Hidden states per Transformer layer (LLM) for Prediction
</th>
</tr>
<tr>
<th class="layer">Layer ⬆️</th>
<th>Top 1</th>
<th>Top 2</th>
<th>Top 3</th>
</tr>
{html_rows}
</table>
</body>
</html>
'''
with tempfile.TemporaryDirectory() as tmpdir:
hti = Html2Image(output_path=tmpdir)
hti.browser_flags = [
"--headless=new",
"--disable-gpu",
"--disable-software-rasterizer",
"--no-sandbox",
]
filename = str(uuid.uuid4()) + ".png"
hti.screenshot(html_str=html_code, save_as=filename, size=(500, 1000))
img_path = os.path.join(tmpdir, filename)
img_cv2 = cv2.imread(img_path)[:, :, ::-1]
os.remove(img_path)
return img_cv2
model = AutoModel.from_pretrained(
"khang119966/Vintern-1B-v3_5-explainableAI",
torch_dtype=torch.bfloat16,
low_cpu_mem_usage=True,
trust_remote_code=True,
use_flash_attn=False,
).eval().cuda()
tokenizer = AutoTokenizer.from_pretrained("khang119966/Vintern-1B-v3_5-explainableAI", trust_remote_code=True, use_fast=False)
# Hàm bao để truyền vào multiprocessing
def generate_text_img_wrapper(args):
return generate_text_image_with_html2image(*args, image_width=500, min_height=1000)
def generate_hidden_img_wrapper(args):
return render_next_token_table_image(*args)
@spaces.GPU(duration=120)
def generate_video(image, prompt, max_tokens):
print(image)
pixel_values, target_aspect_ratio = load_image(image, max_num=6)
pixel_values = pixel_values.to(torch.bfloat16).cuda()
generation_config = dict(max_new_tokens= int(max_tokens), do_sample=False, num_beams = 3, repetition_penalty=2.5)
response, query = model.chat(tokenizer, pixel_values, '<image>\n'+prompt, generation_config, return_history=False, \
attention_visualize=True,last_visualize_layers=7,raw_image_path=image,target_aspect_ratio=target_aspect_ratio)
generation_output = response
raw_image_path = image
attentions_tensors = []
for tok_ in generation_output["attentions"]:
attentions_tensors.append([])
for lay_ in tok_ :
attentions_tensors[-1].append(lay_.detach().cpu().type(torch.float).numpy())
attention_scores = attentions_tensors
query_ = tokenizer(query)
start_img_token_index = int(np.where(np.array(query_["input_ids"])==tokenizer("<img>")["input_ids"][0])[0]+1)
end_img_token_index = int(np.where(np.array(query_["input_ids"])==tokenizer("</img>")["input_ids"][0])[0]-256)
if end_img_token_index - start_img_token_index == 0 :
end_img_token_index = int(np.where(np.array(query_["input_ids"])==tokenizer("</img>")["input_ids"][0])[0])
# Đọc ảnh gốc
image = cv2.imread(raw_image_path)
image = cv2.cvtColor(image, cv2.COLOR_BGR2RGB)
# Resize ảnh nhỏ hơn để giảm dung lượng GIF
scale_factor = 1. # Giảm 50% kích thước
alpha = 0.4
# Lưu danh sách frames GIF
visualization_frames = []
# Chuỗi sinh ra
generated_text = ""
frame_step = 1
input_token = ""
params_for_text = []
params_for_hidden = []
heatmap_imgs = []
top_visual_tokens_focus_tables = []
# Lặp qua từng token
for index_focus in tqdm.tqdm(range(0, generation_output.sequences.shape[1], frame_step)):
predict_token_text = tokenizer.decode(generation_output.sequences[0, index_focus])
generated_text += predict_token_text # Ghép chữ lại
# Tạo heatmap trung bình từ các lớp attention
heat_maps, top_visual_tokens_focus = visualize_attention_hiddenstate(attention_scores[index_focus], head=None,
start_img_token_index=start_img_token_index, end_img_token_index=end_img_token_index,
target_aspect_ratio=target_aspect_ratio)
heatmap = np.array(heat_maps[0])
# Resize heatmap về kích thước ảnh gốc
heatmap = cv2.resize(heatmap, (image.shape[1], image.shape[0]), interpolation=cv2.INTER_CUBIC)
# Làm mượt heatmap
heatmap_smooth = gaussian_filter(heatmap, sigma=1)
# Chuẩn hóa heatmap về 0-255
heatmap_norm = cv2.normalize(heatmap_smooth, None, 0, 255, cv2.NORM_MINMAX).astype(np.uint8)
heatmap_color = cv2.applyColorMap(heatmap_norm, cv2.COLORMAP_JET)
heatmap_color = cv2.cvtColor(heatmap_color, cv2.COLOR_BGR2RGB)
# Overlay ảnh heatmap lên ảnh gốc
overlay = cv2.addWeighted(image, 1 - alpha, heatmap_color, alpha, 0)
prev_text = generated_text[:-len(input_token)-len(predict_token_text)] + " "
params_for_text.append((prev_text, input_token, predict_token_text))
hidden_tabel = extract_next_token_table_data(model, tokenizer, generation_output, index_focus)
params_for_hidden.append((hidden_tabel,predict_token_text))
input_token = predict_token_text
heatmap_imgs.append(overlay)
# Dùng multiprocessing
# with multiprocessing.Pool(processes=20) as pool:
# with ProcessPoolExecutor(max_workers=20) as pool:
# ctx = multiprocessing.get_context()
# ctx.Process(target=lambda: None).daemon = False
# with ctx.Pool(processes=20) as pool:
# text_imgs = pool.map(generate_text_img_wrapper, params_for_text)
# hidden_imgs = pool.map(generate_hidden_img_wrapper, params_for_hidden)
text_imgs = []
for param in tqdm.tqdm(params_for_text):
result = generate_text_img_wrapper(param)
text_imgs.append(result)
hidden_imgs = []
for param in tqdm.tqdm(params_for_hidden):
result = generate_hidden_img_wrapper(param)
hidden_imgs.append(result)
for i in range(len(text_imgs)):
overlay = heatmap_imgs[i]
text_img = text_imgs[i]
predict_hidden_states = hidden_imgs[i]
overlay_adjusted = adjust_overlay(overlay, text_img)
predict_hidden_states = adjust_overlay(predict_hidden_states, text_img)
combined_image = np.hstack((overlay_adjusted, text_img, predict_hidden_states))
visualization_frames.append(combined_image)
resized_visualization_frames = []
for frame in visualization_frames:
frame = cv2.resize(frame,(visualization_frames[0].shape[1],visualization_frames[0].shape[0]))
resized_visualization_frames.append(frame)
# Lưu thành video MP4 bằng imageio
imageio.mimsave(
'heatmap_animation.mp4',
resized_visualization_frames, # dạng RGB
fps=5
)
return "heatmap_animation.mp4"
with gr.Blocks() as demo:
gr.Markdown("""# 🎥 Visualizing How Multimodal Models Think
- This tool generates a video to **visualize how a multimodal model (image + text)** attends to different parts of an image while generating text.
📌 What it does: - Takes an input image and a text prompt. - Shows how the model’s attention shifts on the image for each generated token. - Helps explain the model’s behavior and decision-making.
🖼️ Video layout (per frame): Each frame in the video includes: 1. 🔥 **Heatmap over image**: Shows which area the model focuses on. 2. 📝 **Generated text**: With old context, current token highlighted. 3. 📊 **Token prediction table**: Shows the model’s top next-token guesses.
🎯 Use cases: Research explainability of vision-language models. - Debugging or interpreting model outputs. - Creating educational visualizations.
""")
with gr.Row():
with gr.Column():
image = gr.Image(label="Upload your image", type = 'filepath')
prompt = gr.Textbox(label="Describe your prompt", value="List all the text." )
max_tokens = gr.Slider(label="Max token output (⚠️ Choose <100 for faster response)", minimum=1, maximum=256, value=50)
btn = gr.Button("Inference")
video = gr.Video(label="Visualization Video")
btn.click(fn=generate_video, inputs=[image, prompt, max_tokens], outputs=video)
if __name__ == "__main__":
demo.launch()