developer0hye commited on
Commit
bade21f
·
verified ·
1 Parent(s): 38cd071

Create app.py

Browse files
Files changed (1) hide show
  1. app.py +179 -0
app.py ADDED
@@ -0,0 +1,179 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import gradio as gr
2
+ import spaces
3
+ import torch
4
+ import math
5
+ import numpy as np
6
+ import os
7
+ from PIL import Image
8
+ import torchvision.transforms as T
9
+ from torchvision.transforms.functional import InterpolationMode
10
+ from transformers import AutoModel, AutoTokenizer, AutoConfig
11
+
12
+ # =============================================================================
13
+ # InternVL‑3 preprocessing utilities (image‑only version)
14
+ # =============================================================================
15
+ IMAGENET_MEAN = (0.485, 0.456, 0.406)
16
+ IMAGENET_STD = (0.229, 0.224, 0.225)
17
+
18
+
19
+ def build_transform(input_size: int = 448):
20
+ """Return torchvision transform matching InternVL pre‑training."""
21
+ return T.Compose(
22
+ [
23
+ T.Lambda(lambda img: img.convert("RGB") if img.mode != "RGB" else img),
24
+ T.Resize((input_size, input_size), interpolation=InterpolationMode.BICUBIC),
25
+ T.ToTensor(),
26
+ T.Normalize(mean=IMAGENET_MEAN, std=IMAGENET_STD),
27
+ ]
28
+ )
29
+
30
+
31
+ def find_closest_aspect_ratio(aspect_ratio, target_ratios, width, height, image_size):
32
+ best_ratio_diff = float("inf")
33
+ best_ratio = (1, 1)
34
+ area = width * height
35
+ for ratio in target_ratios:
36
+ tgt_ar = ratio[0] / ratio[1]
37
+ diff = abs(aspect_ratio - tgt_ar)
38
+ if diff < best_ratio_diff or (diff == best_ratio_diff and area > 0.5 * image_size * image_size * ratio[0] * ratio[1]):
39
+ best_ratio_diff = diff
40
+ best_ratio = ratio
41
+ return best_ratio
42
+
43
+
44
+ def dynamic_preprocess(image, min_num=1, max_num=12, image_size=448, use_thumbnail=False):
45
+ """Split arbitrarily‑sized image into ≤12 tiles sized 448×448 (InternVL spec)."""
46
+ ow, oh = image.size
47
+ aspect_ratio = ow / oh
48
+ target_ratios = sorted(
49
+ {(i, j) for n in range(min_num, max_num + 1) for i in range(1, n + 1) for j in range(1, n + 1) if min_num <= i * j <= max_num},
50
+ key=lambda x: x[0] * x[1],
51
+ )
52
+ ratio = find_closest_aspect_ratio(aspect_ratio, target_ratios, ow, oh, image_size)
53
+ tw, th = image_size * ratio[0], image_size * ratio[1]
54
+ blocks = ratio[0] * ratio[1]
55
+ resized = image.resize((tw, th))
56
+ tiles = [
57
+ resized.crop(
58
+ (
59
+ (idx % (tw // image_size)) * image_size,
60
+ (idx // (tw // image_size)) * image_size,
61
+ ((idx % (tw // image_size)) + 1) * image_size,
62
+ ((idx // (tw // image_size)) + 1) * image_size,
63
+ )
64
+ )
65
+ for idx in range(blocks)
66
+ ]
67
+ if use_thumbnail and blocks != 1:
68
+ tiles.append(image.resize((image_size, image_size)))
69
+ return tiles
70
+
71
+
72
+ def load_image(path: str, input_size: int = 448, max_num: int = 12):
73
+ """Return tensor of shape (N, 3, H, W) ready for InternVL."""
74
+ img = Image.open(path).convert("RGB")
75
+ transform = build_transform(input_size)
76
+ tiles = dynamic_preprocess(img, image_size=input_size, use_thumbnail=True, max_num=max_num)
77
+ return torch.stack([transform(t) for t in tiles])
78
+
79
+
80
+ # =============================================================================
81
+ # InternVL‑3‑14B model loading (multi‑GPU aware)
82
+ # =============================================================================
83
+ MODEL_ID = "OpenGVLab/InternVL3-14B"
84
+
85
+
86
+ def split_model(model_name: str):
87
+ """Distribute LLM layers across GPUs, keeping vision encoder on GPU 0."""
88
+ n_gpu = torch.cuda.device_count()
89
+ if n_gpu < 2:
90
+ return "auto" # let transformers decide
91
+
92
+ cfg = AutoConfig.from_pretrained(model_name, trust_remote_code=True)
93
+ n_layers = cfg.llm_config.num_hidden_layers # type: ignore[attr-defined]
94
+
95
+ # GPU0 does vision + some text layers => treat as 0.5 GPU
96
+ per_gpu = math.ceil(n_layers / (n_gpu - 0.5))
97
+ alloc = [per_gpu] * n_gpu
98
+ alloc[0] = math.ceil(alloc[0] * 0.5)
99
+
100
+ dmap = {
101
+ "vision_model": 0,
102
+ "mlp1": 0,
103
+ "language_model.model.tok_embeddings": 0,
104
+ "language_model.model.embed_tokens": 0,
105
+ "language_model.output": 0,
106
+ "language_model.model.norm": 0,
107
+ "language_model.model.rotary_emb": 0,
108
+ "language_model.lm_head": 0,
109
+ }
110
+ layer_idx = 0
111
+ for gpu, n in enumerate(alloc):
112
+ for _ in range(n):
113
+ if layer_idx >= n_layers:
114
+ break
115
+ dmap[f"language_model.model.layers.{layer_idx}"] = 0 if layer_idx == n_layers - 1 else gpu
116
+ layer_idx += 1
117
+ return dmap
118
+
119
+
120
+ device_map = split_model(MODEL_ID)
121
+
122
+ model = AutoModel.from_pretrained(
123
+ MODEL_ID,
124
+ torch_dtype=torch.bfloat16,
125
+ low_cpu_mem_usage=True,
126
+ use_flash_attn=True,
127
+ trust_remote_code=True,
128
+ device_map=device_map,
129
+ ).eval()
130
+
131
+ tokenizer = AutoTokenizer.from_pretrained(MODEL_ID, trust_remote_code=True, use_fast=False)
132
+
133
+
134
+ # =============================================================================
135
+ # Inference function (image‑only)
136
+ # =============================================================================
137
+ @spaces.GPU
138
+ def internvl_inference(image_path: str | None, text_input: str | None = None):
139
+ if image_path is None:
140
+ return "Please upload an image first."
141
+ pixel_values = load_image(image_path, max_num=12).to(torch.bfloat16).cuda()
142
+ prompt = f"<image>\n{text_input}" if text_input else "<image>\n"
143
+ gen_cfg = dict(max_new_tokens=1024, do_sample=True)
144
+ return model.chat(tokenizer, pixel_values, prompt, gen_cfg)
145
+
146
+
147
+ # =============================================================================
148
+ # Gradio UI (image‑only, Gradio 5 compatible)
149
+ # =============================================================================
150
+ DESCRIPTION = (
151
+ "[InternVL 3‑14B demo](https://huggingface.co/OpenGVLab/InternVL3-14B) — "
152
+ "upload an image and ask anything about it."
153
+ )
154
+
155
+ css = """
156
+ #output_text {
157
+ height: 500px;
158
+ overflow: auto;
159
+ border: 1px solid #ccc;
160
+ }
161
+ """
162
+
163
+ with gr.Blocks(css=css, theme="origin") as demo:
164
+ gr.Markdown(DESCRIPTION)
165
+
166
+ with gr.Row():
167
+ # Left column: image, question, submit button (stacked vertically)
168
+ with gr.Column(scale=1):
169
+ input_image = gr.Image(label="Upload Image", type="filepath")
170
+ text_input = gr.Textbox(label="Question")
171
+ submit_btn = gr.Button("Submit")
172
+ # Right column: model output
173
+ with gr.Column(scale=1):
174
+ output_text = gr.Textbox(label="Model Output", elem_id="output_text")
175
+
176
+ submit_btn.click(internvl_inference, [input_image, text_input], [output_text])
177
+
178
+ if __name__ == "__main__":
179
+ demo.launch()