Spaces:
Running
on
Zero
Running
on
Zero
import gradio as gr | |
import torch | |
from transformers import AutoModelForCausalLM, AutoTokenizer, TextIteratorStreamer, StoppingCriteria | |
import gradio as gr | |
import spaces | |
import torch | |
import numpy as np | |
import torch | |
import torchvision.transforms as T | |
from PIL import Image | |
from torchvision.transforms.functional import InterpolationMode | |
from transformers import AutoModel, AutoTokenizer | |
from PIL import Image, ExifTags | |
import cv2 | |
import numpy as np | |
import torch | |
from html2image import Html2Image | |
import tempfile | |
import os | |
import uuid | |
from scipy.ndimage import gaussian_filter | |
from threading import Thread | |
import re | |
import time | |
from PIL import Image | |
import torch | |
import spaces | |
import subprocess | |
import os | |
from moviepy.editor import VideoFileClip, AudioFileClip | |
subprocess.run('pip install flash-attn --no-build-isolation', env={'FLASH_ATTENTION_SKIP_CUDA_BUILD': "TRUE"}, shell=True) | |
torch.set_default_device('cuda') | |
IMAGENET_MEAN = (0.485, 0.456, 0.406) | |
IMAGENET_STD = (0.229, 0.224, 0.225) | |
def build_transform(input_size): | |
MEAN, STD = IMAGENET_MEAN, IMAGENET_STD | |
transform = T.Compose([ | |
T.Lambda(lambda img: img.convert('RGB') if img.mode != 'RGB' else img), | |
T.Resize((input_size, input_size), interpolation=InterpolationMode.BICUBIC), | |
T.ToTensor(), | |
T.Normalize(mean=MEAN, std=STD) | |
]) | |
return transform | |
def find_closest_aspect_ratio(aspect_ratio, target_ratios, width, height, image_size): | |
best_ratio_diff = float('inf') | |
best_ratio = (1, 1) | |
area = width * height | |
for ratio in target_ratios: | |
target_aspect_ratio = ratio[0] / ratio[1] | |
ratio_diff = abs(aspect_ratio - target_aspect_ratio) | |
if ratio_diff < best_ratio_diff: | |
best_ratio_diff = ratio_diff | |
best_ratio = ratio | |
elif ratio_diff == best_ratio_diff: | |
if area > 0.5 * image_size * image_size * ratio[0] * ratio[1]: | |
best_ratio = ratio | |
return best_ratio | |
def dynamic_preprocess(image, min_num=1, max_num=12, image_size=448, use_thumbnail=False): | |
orig_width, orig_height = image.size | |
aspect_ratio = orig_width / orig_height | |
# calculate the existing image aspect ratio | |
target_ratios = set( | |
(i, j) for n in range(min_num, max_num + 1) for i in range(1, n + 1) for j in range(1, n + 1) if | |
i * j <= max_num and i * j >= min_num) | |
target_ratios = sorted(target_ratios, key=lambda x: x[0] * x[1]) | |
# find the closest aspect ratio to the target | |
target_aspect_ratio = find_closest_aspect_ratio( | |
aspect_ratio, target_ratios, orig_width, orig_height, image_size) | |
# calculate the target width and height | |
target_width = image_size * target_aspect_ratio[0] | |
target_height = image_size * target_aspect_ratio[1] | |
blocks = target_aspect_ratio[0] * target_aspect_ratio[1] | |
# resize the image | |
resized_img = image.resize((target_width, target_height)) | |
processed_images = [] | |
for i in range(blocks): | |
box = ( | |
(i % (target_width // image_size)) * image_size, | |
(i // (target_width // image_size)) * image_size, | |
((i % (target_width // image_size)) + 1) * image_size, | |
((i // (target_width // image_size)) + 1) * image_size | |
) | |
# split the image | |
split_img = resized_img.crop(box) | |
processed_images.append(split_img) | |
assert len(processed_images) == blocks | |
if use_thumbnail and len(processed_images) != 1: | |
thumbnail_img = image.resize((image_size, image_size)) | |
processed_images.append(thumbnail_img) | |
return processed_images, target_aspect_ratio | |
def correct_image_orientation(image_path): | |
# Mở ảnh | |
image = Image.open(image_path) | |
# Kiểm tra dữ liệu Exif (nếu có) | |
try: | |
exif = image._getexif() | |
if exif is not None: | |
for tag, value in exif.items(): | |
if ExifTags.TAGS.get(tag) == "Orientation": | |
# Sửa hướng dựa trên Orientation | |
if value == 3: | |
image = image.rotate(180, expand=True) | |
elif value == 6: | |
image = image.rotate(-90, expand=True) | |
elif value == 8: | |
image = image.rotate(90, expand=True) | |
break | |
except Exception as e: | |
print("Không thể xử lý Exif:", e) | |
return image | |
def load_image(image_file, input_size=448, max_num=12, target_aspect_ratio=False): | |
image = correct_image_orientation(image_file).convert('RGB') | |
transform = build_transform(input_size=input_size) | |
images, target_aspect_ratio = dynamic_preprocess(image, image_size=input_size, use_thumbnail=True, max_num=max_num) | |
pixel_values = [transform(image) for image in images] | |
pixel_values = torch.stack(pixel_values) | |
if target_aspect_ratio: | |
return pixel_values, target_aspect_ratio | |
else: | |
return pixel_values | |
model = AutoModel.from_pretrained( | |
"khang119966/Vintern-1B-v3_5-explainableAI", | |
torch_dtype=torch.bfloat16, | |
low_cpu_mem_usage=True, | |
trust_remote_code=True, | |
).eval().cuda() | |
tokenizer = AutoTokenizer.from_pretrained("khang119966/Vintern-1B-v3_5-explainableAI", trust_remote_code=True, use_fast=False) | |
def generate_video(image, prompt, max_tokens): | |
print(image) | |
pixel_values, target_aspect_ratio = load_image(image, max_num=6).to(torch.bfloat16).cuda() | |
generation_config = dict(max_new_tokens= int(max_tokens), do_sample=False, num_beams = 3, repetition_penalty=2.5) | |
response, query = model.chat(tokenizer, pixel_values, '<image>\n'+prompt, generation_config, return_history=False, \ | |
attention_visualize=True,last_visualize_layers=7,raw_image_path=test_image,target_aspect_ratio=target_aspect_ratio) | |
print(response) | |
return "path_to_generated_video.mp4" | |
with gr.Blocks() as demo: | |
gr.Markdown("### Simple VLM Demo") | |
with gr.Row(): | |
with gr.Column(): | |
image = gr.Image(label="Upload your image") | |
prompt = gr.Textbox(label="Describe your prompt", value="List all the text." ) | |
max_tokens = gr.Slider(label="Max token output (⚠️ Choose <100 for faster response)", minimum=1, maximum=512, value=50) | |
btn = gr.Button("Attenion Video") | |
video = gr.Video(label="Attenion Video") | |
btn.click(fn=generate_video, inputs=[image, prompt, max_tokens], outputs=video) | |
if __name__ == "__main__": | |
demo.launch() |