mknolan commited on
Commit
9fb6eb1
·
verified ·
1 Parent(s): 8e8d08d

Upload app.py with huggingface_hub

Browse files
Files changed (1) hide show
  1. app.py +339 -0
app.py ADDED
@@ -0,0 +1,339 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import os
2
+ import sys
3
+ import math
4
+ import numpy as np
5
+ import torch
6
+ import torchvision.transforms as T
7
+ from torchvision.transforms.functional import InterpolationMode
8
+ from PIL import Image
9
+ import gradio as gr
10
+ from transformers import AutoModel, AutoTokenizer
11
+
12
+ # Constants
13
+ IMAGENET_MEAN = (0.485, 0.456, 0.406)
14
+ IMAGENET_STD = (0.229, 0.224, 0.225)
15
+
16
+ # Configuration
17
+ MODEL_NAME = "OpenGVLab/InternVL2_5-8B" # Smaller model for faster loading
18
+ IMAGE_SIZE = 448
19
+
20
+ # Set up environment variables
21
+ os.environ["PYTORCH_CUDA_ALLOC_CONF"] = "max_split_size_mb:128"
22
+
23
+ # Utility functions for image processing
24
+ def build_transform(input_size):
25
+ MEAN, STD = IMAGENET_MEAN, IMAGENET_STD
26
+ transform = T.Compose([
27
+ T.Lambda(lambda img: img.convert('RGB') if img.mode != 'RGB' else img),
28
+ T.Resize((input_size, input_size), interpolation=InterpolationMode.BICUBIC),
29
+ T.ToTensor(),
30
+ T.Normalize(mean=MEAN, std=STD)
31
+ ])
32
+ return transform
33
+
34
+ def find_closest_aspect_ratio(aspect_ratio, target_ratios, width, height, image_size):
35
+ best_ratio_diff = float('inf')
36
+ best_ratio = (1, 1)
37
+ area = width * height
38
+ for ratio in target_ratios:
39
+ target_aspect_ratio = ratio[0] / ratio[1]
40
+ ratio_diff = abs(aspect_ratio - target_aspect_ratio)
41
+ if ratio_diff < best_ratio_diff:
42
+ best_ratio_diff = ratio_diff
43
+ best_ratio = ratio
44
+ elif ratio_diff == best_ratio_diff:
45
+ if area > 0.5 * image_size * image_size * ratio[0] * ratio[1]:
46
+ best_ratio = ratio
47
+ return best_ratio
48
+
49
+ def dynamic_preprocess(image, min_num=1, max_num=12, image_size=448, use_thumbnail=False):
50
+ orig_width, orig_height = image.size
51
+ aspect_ratio = orig_width / orig_height
52
+
53
+ # calculate the existing image aspect ratio
54
+ target_ratios = set(
55
+ (i, j) for n in range(min_num, max_num + 1) for i in range(1, n + 1) for j in range(1, n + 1) if
56
+ i * j <= max_num and i * j >= min_num)
57
+ target_ratios = sorted(target_ratios, key=lambda x: x[0] * x[1])
58
+
59
+ # find the closest aspect ratio to the target
60
+ target_aspect_ratio = find_closest_aspect_ratio(
61
+ aspect_ratio, target_ratios, orig_width, orig_height, image_size)
62
+
63
+ # calculate the target width and height
64
+ target_width = image_size * target_aspect_ratio[0]
65
+ target_height = image_size * target_aspect_ratio[1]
66
+ blocks = target_aspect_ratio[0] * target_aspect_ratio[1]
67
+
68
+ # resize the image
69
+ resized_img = image.resize((target_width, target_height))
70
+ processed_images = []
71
+ for i in range(blocks):
72
+ box = (
73
+ (i % (target_width // image_size)) * image_size,
74
+ (i // (target_width // image_size)) * image_size,
75
+ ((i % (target_width // image_size)) + 1) * image_size,
76
+ ((i // (target_width // image_size)) + 1) * image_size
77
+ )
78
+ # split the image
79
+ split_img = resized_img.crop(box)
80
+ processed_images.append(split_img)
81
+ assert len(processed_images) == blocks
82
+ if use_thumbnail and len(processed_images) != 1:
83
+ thumbnail_img = image.resize((image_size, image_size))
84
+ processed_images.append(thumbnail_img)
85
+ return processed_images
86
+
87
+ # Function to split model across GPUs
88
+ def split_model(model_name):
89
+ device_map = {}
90
+ world_size = torch.cuda.device_count()
91
+ if world_size <= 1:
92
+ return "auto"
93
+
94
+ num_layers = {
95
+ 'InternVL2_5-1B': 24,
96
+ 'InternVL2_5-2B': 24,
97
+ 'InternVL2_5-4B': 36,
98
+ 'InternVL2_5-8B': 32,
99
+ 'InternVL2_5-26B': 48,
100
+ 'InternVL2_5-38B': 64,
101
+ 'InternVL2_5-78B': 80
102
+ }[model_name]
103
+
104
+ # Since the first GPU will be used for ViT, treat it as half a GPU.
105
+ num_layers_per_gpu = math.ceil(num_layers / (world_size - 0.5))
106
+ num_layers_per_gpu = [num_layers_per_gpu] * world_size
107
+ num_layers_per_gpu[0] = math.ceil(num_layers_per_gpu[0] * 0.5)
108
+ layer_cnt = 0
109
+ for i, num_layer in enumerate(num_layers_per_gpu):
110
+ for j in range(num_layer):
111
+ device_map[f'language_model.model.layers.{layer_cnt}'] = i
112
+ layer_cnt += 1
113
+ device_map['vision_model'] = 0
114
+ device_map['mlp1'] = 0
115
+ device_map['language_model.model.tok_embeddings'] = 0
116
+ device_map['language_model.model.embed_tokens'] = 0
117
+ device_map['language_model.model.rotary_emb'] = 0
118
+ device_map['language_model.output'] = 0
119
+ device_map['language_model.model.norm'] = 0
120
+ device_map['language_model.lm_head'] = 0
121
+ device_map[f'language_model.model.layers.{num_layers - 1}'] = 0
122
+
123
+ return device_map
124
+
125
+ # Model loading function
126
+ def load_model():
127
+ print(f"\n=== Loading {MODEL_NAME} ===")
128
+ print(f"CUDA available: {torch.cuda.is_available()}")
129
+
130
+ if torch.cuda.is_available():
131
+ print(f"GPU count: {torch.cuda.device_count()}")
132
+ for i in range(torch.cuda.device_count()):
133
+ print(f"GPU {i}: {torch.cuda.get_device_name(i)}")
134
+
135
+ # Memory info
136
+ print(f"Total GPU memory: {torch.cuda.get_device_properties(0).total_memory / 1e9:.2f} GB")
137
+ print(f"Allocated GPU memory: {torch.cuda.memory_allocated() / 1e9:.2f} GB")
138
+ print(f"Reserved GPU memory: {torch.cuda.memory_reserved() / 1e9:.2f} GB")
139
+
140
+ # Determine device map
141
+ device_map = "auto"
142
+ if torch.cuda.is_available() and torch.cuda.device_count() > 1:
143
+ model_short_name = MODEL_NAME.split('/')[-1]
144
+ device_map = split_model(model_short_name)
145
+
146
+ # Load model and tokenizer
147
+ try:
148
+ model = AutoModel.from_pretrained(
149
+ MODEL_NAME,
150
+ torch_dtype=torch.bfloat16 if torch.cuda.is_available() else torch.float32,
151
+ low_cpu_mem_usage=True,
152
+ trust_remote_code=True,
153
+ device_map=device_map
154
+ )
155
+
156
+ tokenizer = AutoTokenizer.from_pretrained(
157
+ MODEL_NAME,
158
+ use_fast=False,
159
+ trust_remote_code=True
160
+ )
161
+
162
+ # Fix for image context token ID - needed to make the model work with images
163
+ print("Setting image context token ID...")
164
+ if hasattr(tokenizer, 'encode'):
165
+ # Get special token ID from tokenizer
166
+ img_context_token_id = tokenizer.encode("<image>", add_special_tokens=False)[0]
167
+ model.img_context_token_id = img_context_token_id
168
+ print(f"Set img_context_token_id to {img_context_token_id}")
169
+
170
+ print(f"✓ Model and tokenizer loaded successfully!")
171
+ return model, tokenizer
172
+ except Exception as e:
173
+ print(f"❌ Error loading model: {e}")
174
+ import traceback
175
+ traceback.print_exc()
176
+ return None, None
177
+
178
+ # Image analysis function
179
+ def analyze_image(model, tokenizer, image, prompt):
180
+ try:
181
+ # Check if image is valid
182
+ if image is None:
183
+ return "Please upload an image first."
184
+
185
+ # Process the image
186
+ processed_images = dynamic_preprocess(image, image_size=IMAGE_SIZE)
187
+
188
+ # Prepare the prompt
189
+ text_prompt = f"USER: <image>\n{prompt}\nASSISTANT:"
190
+
191
+ # Convert inputs for the model
192
+ inputs = tokenizer([text_prompt], return_tensors="pt")
193
+
194
+ # Move inputs to the right device
195
+ if torch.cuda.is_available():
196
+ inputs = {k: v.cuda() for k, v in inputs.items()}
197
+
198
+ # Add image to the inputs
199
+ inputs["images"] = processed_images
200
+
201
+ # Generate a response
202
+ with torch.no_grad():
203
+ outputs = model.generate(
204
+ **inputs,
205
+ max_new_tokens=512,
206
+ )
207
+
208
+ # Decode the outputs
209
+ generated_text = tokenizer.decode(outputs[0], skip_special_tokens=True)
210
+
211
+ # Extract only the assistant's response
212
+ assistant_response = generated_text.split("ASSISTANT:")[-1].strip()
213
+
214
+ return assistant_response
215
+ except Exception as e:
216
+ import traceback
217
+ error_msg = f"Error analyzing image: {str(e)}\n{traceback.format_exc()}"
218
+ return error_msg
219
+
220
+ # Function to handle two images
221
+ def analyze_two_images(model, tokenizer, image1, image2, prompt):
222
+ try:
223
+ # Check if at least one image is provided
224
+ if image1 is None and image2 is None:
225
+ return "Please upload at least one image."
226
+
227
+ results = []
228
+
229
+ # Process first image if provided
230
+ if image1 is not None:
231
+ result1 = analyze_image(model, tokenizer, image1, prompt)
232
+ results.append(f"# Image 1 Analysis\n\n{result1}")
233
+ else:
234
+ results.append("# Image 1\n\nNo image uploaded.")
235
+
236
+ # Process second image if provided
237
+ if image2 is not None:
238
+ result2 = analyze_image(model, tokenizer, image2, prompt)
239
+ results.append(f"# Image 2 Analysis\n\n{result2}")
240
+ else:
241
+ results.append("# Image 2\n\nNo image uploaded.")
242
+
243
+ # Combine results
244
+ combined_result = f"{results[0]}\n\n---\n\n{results[1]}"
245
+
246
+ return combined_result
247
+ except Exception as e:
248
+ import traceback
249
+ error_msg = f"Error analyzing images: {str(e)}\n{traceback.format_exc()}"
250
+ return error_msg
251
+
252
+ # Main function
253
+ def main():
254
+ # Load the model
255
+ model, tokenizer = load_model()
256
+
257
+ if model is None:
258
+ # Create an error interface if model loading failed
259
+ demo = gr.Interface(
260
+ fn=lambda x: "Model loading failed. Please check the logs for details.",
261
+ inputs=gr.Textbox(),
262
+ outputs=gr.Textbox(),
263
+ title="InternVL2.5 Dual Image Analyzer - Error",
264
+ description="The model failed to load. Please check the logs for more information."
265
+ )
266
+ return demo
267
+
268
+ # Predefined prompts for analysis
269
+ prompts = [
270
+ "Describe this image in detail.",
271
+ "What can you tell me about this image?",
272
+ "Is there any text in this image? If so, can you read it?",
273
+ "What is the main subject of this image?",
274
+ "What emotions or feelings does this image convey?",
275
+ "Describe the composition and visual elements of this image.",
276
+ "Summarize what you see in this image in one paragraph.",
277
+ "Compare these images and describe the differences."
278
+ ]
279
+
280
+ # Create the interface with two images
281
+ with gr.Blocks(title="InternVL2.5 Dual Image Analyzer") as demo:
282
+ gr.Markdown("# 🖼️ InternVL2.5 Dual Image Analyzer")
283
+ gr.Markdown("Upload one or two images and ask the InternVL2.5 model to analyze them.")
284
+
285
+ with gr.Row():
286
+ with gr.Column(scale=1):
287
+ image1 = gr.Image(type="pil", label="Upload Image 1")
288
+ image2 = gr.Image(type="pil", label="Upload Image 2")
289
+ prompt = gr.Dropdown(
290
+ choices=prompts,
291
+ value=prompts[0],
292
+ label="Select a prompt or write your own below",
293
+ allow_custom_value=True
294
+ )
295
+ analyze_button = gr.Button("Analyze Images", variant="primary")
296
+
297
+ with gr.Column(scale=1):
298
+ output = gr.Markdown(label="Analysis Results")
299
+
300
+ analyze_button.click(
301
+ fn=lambda img1, img2, p: analyze_two_images(model, tokenizer, img1, img2, p),
302
+ inputs=[image1, image2, prompt],
303
+ outputs=output
304
+ )
305
+
306
+ # Example images
307
+ if os.path.exists("example_images"):
308
+ example_files = [f for f in os.listdir("example_images") if f.endswith((".jpg", ".jpeg", ".png"))]
309
+ if len(example_files) >= 2:
310
+ example1 = os.path.join("example_images", example_files[0])
311
+ example2 = os.path.join("example_images", example_files[1])
312
+
313
+ examples = [
314
+ [example1, None, "Describe this image in detail."],
315
+ [None, example2, "Describe this image in detail."],
316
+ [example1, example2, "Compare these images and describe the differences."]
317
+ ]
318
+
319
+ gr.Examples(
320
+ examples=examples,
321
+ inputs=[image1, image2, prompt]
322
+ )
323
+
324
+ return demo
325
+
326
+ # Run the application
327
+ if __name__ == "__main__":
328
+ try:
329
+ # Check for GPU
330
+ if not torch.cuda.is_available():
331
+ print("WARNING: CUDA is not available. The model requires a GPU to function properly.")
332
+
333
+ # Create and launch the interface
334
+ demo = main()
335
+ demo.launch(server_name="0.0.0.0")
336
+ except Exception as e:
337
+ print(f"Error starting the application: {e}")
338
+ import traceback
339
+ traceback.print_exc()