mknolan commited on
Commit
b9314fe
·
verified ·
1 Parent(s): 82ce431

Upload app.py with huggingface_hub

Browse files
Files changed (1) hide show
  1. app.py +334 -0
app.py ADDED
@@ -0,0 +1,334 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import os
2
+ import sys
3
+ import math
4
+ import numpy as np
5
+ import torch
6
+ import torchvision.transforms as T
7
+ from torchvision.transforms.functional import InterpolationMode
8
+ from PIL import Image
9
+ import gradio as gr
10
+ from transformers import AutoModel, AutoTokenizer
11
+
12
+
13
+ # Enhanced debug printing
14
+ import logging
15
+ import traceback
16
+
17
+ # Configure logging
18
+ logging.basicConfig(
19
+ level=logging.INFO,
20
+ format='%(asctime)s - %(name)s - %(levelname)s - %(message)s',
21
+ handlers=[logging.StreamHandler()]
22
+ )
23
+ logger = logging.getLogger("InternVL2.5-Debug")
24
+
25
+ # Print environment info
26
+ logger.info("Python version: %s", sys.version)
27
+ logger.info("PyTorch version: %s", torch.__version__)
28
+ logger.info("Transformers version: %s", __import__("transformers").__version__)
29
+ try:
30
+ logger.info("Einops version: %s", __import__("einops").__version__)
31
+ except ImportError:
32
+ logger.error("Einops is not installed!")
33
+ # Constants
34
+ IMAGENET_MEAN = (0.485, 0.456, 0.406)
35
+ IMAGENET_STD = (0.229, 0.224, 0.225)
36
+
37
+ # Configuration
38
+ MODEL_NAME = "OpenGVLab/InternVL2_5-8B" # Smaller model for faster loading
39
+ IMAGE_SIZE = 448
40
+
41
+ # Set up environment variables
42
+ os.environ["PYTORCH_CUDA_ALLOC_CONF"] = "max_split_size_mb:128"
43
+
44
+ # Utility functions for image processing
45
+ def build_transform(input_size):
46
+ MEAN, STD = IMAGENET_MEAN, IMAGENET_STD
47
+ transform = T.Compose([
48
+ T.Lambda(lambda img: img.convert('RGB') if img.mode != 'RGB' else img),
49
+ T.Resize((input_size, input_size), interpolation=InterpolationMode.BICUBIC),
50
+ T.ToTensor(),
51
+ T.Normalize(mean=MEAN, std=STD)
52
+ ])
53
+ return transform
54
+
55
+ def find_closest_aspect_ratio(aspect_ratio, target_ratios, width, height, image_size):
56
+ best_ratio_diff = float('inf')
57
+ best_ratio = (1, 1)
58
+ area = width * height
59
+ for ratio in target_ratios:
60
+ target_aspect_ratio = ratio[0] / ratio[1]
61
+ ratio_diff = abs(aspect_ratio - target_aspect_ratio)
62
+ if ratio_diff < best_ratio_diff:
63
+ best_ratio_diff = ratio_diff
64
+ best_ratio = ratio
65
+ elif ratio_diff == best_ratio_diff:
66
+ if area > 0.5 * image_size * image_size * ratio[0] * ratio[1]:
67
+ best_ratio = ratio
68
+ return best_ratio
69
+
70
+ def dynamic_preprocess(image, min_num=1, max_num=12, image_size=448, use_thumbnail=False):
71
+ orig_width, orig_height = image.size
72
+ aspect_ratio = orig_width / orig_height
73
+
74
+ # calculate the existing image aspect ratio
75
+ target_ratios = set(
76
+ (i, j) for n in range(min_num, max_num + 1) for i in range(1, n + 1) for j in range(1, n + 1) if
77
+ i * j <= max_num and i * j >= min_num)
78
+ target_ratios = sorted(target_ratios, key=lambda x: x[0] * x[1])
79
+
80
+ # find the closest aspect ratio to the target
81
+ target_aspect_ratio = find_closest_aspect_ratio(
82
+ aspect_ratio, target_ratios, orig_width, orig_height, image_size)
83
+
84
+ # calculate the target width and height
85
+ target_width = image_size * target_aspect_ratio[0]
86
+ target_height = image_size * target_aspect_ratio[1]
87
+ blocks = target_aspect_ratio[0] * target_aspect_ratio[1]
88
+
89
+ # resize the image
90
+ resized_img = image.resize((target_width, target_height))
91
+ processed_images = []
92
+ for i in range(blocks):
93
+ box = (
94
+ (i % (target_width // image_size)) * image_size,
95
+ (i // (target_width // image_size)) * image_size,
96
+ ((i % (target_width // image_size)) + 1) * image_size,
97
+ ((i // (target_width // image_size)) + 1) * image_size
98
+ )
99
+ # split the image
100
+ split_img = resized_img.crop(box)
101
+ processed_images.append(split_img)
102
+ assert len(processed_images) == blocks
103
+ if use_thumbnail and len(processed_images) != 1:
104
+ thumbnail_img = image.resize((image_size, image_size))
105
+ processed_images.append(thumbnail_img)
106
+ return processed_images
107
+
108
+ # Load and preprocess image for the model - following the official documentation pattern
109
+ def load_image(image_pil, max_num=12):
110
+ # Process the image using dynamic_preprocess
111
+ processed_images = dynamic_preprocess(image_pil, image_size=IMAGE_SIZE, max_num=max_num)
112
+
113
+ # Convert PIL images to tensor format expected by the model
114
+ transform = build_transform(IMAGE_SIZE)
115
+ pixel_values = [transform(img) for img in processed_images]
116
+ pixel_values = torch.stack(pixel_values)
117
+
118
+ # Convert to appropriate data type
119
+ if torch.cuda.is_available():
120
+ pixel_values = pixel_values.cuda().to(torch.bfloat16)
121
+ else:
122
+ pixel_values = pixel_values.to(torch.float32)
123
+
124
+ return pixel_values
125
+
126
+ # Function to split model across GPUs
127
+ def split_model(model_name):
128
+ device_map = {}
129
+ world_size = torch.cuda.device_count()
130
+ if world_size <= 1:
131
+ return "auto"
132
+
133
+ num_layers = {
134
+ 'InternVL2_5-1B': 24,
135
+ 'InternVL2_5-2B': 24,
136
+ 'InternVL2_5-4B': 36,
137
+ 'InternVL2_5-8B': 32,
138
+ 'InternVL2_5-26B': 48,
139
+ 'InternVL2_5-38B': 64,
140
+ 'InternVL2_5-78B': 80
141
+ }[model_name]
142
+
143
+ # Since the first GPU will be used for ViT, treat it as half a GPU.
144
+ num_layers_per_gpu = math.ceil(num_layers / (world_size - 0.5))
145
+ num_layers_per_gpu = [num_layers_per_gpu] * world_size
146
+ num_layers_per_gpu[0] = math.ceil(num_layers_per_gpu[0] * 0.5)
147
+ layer_cnt = 0
148
+ for i, num_layer in enumerate(num_layers_per_gpu):
149
+ for j in range(num_layer):
150
+ device_map[f'language_model.model.layers.{layer_cnt}'] = i
151
+ layer_cnt += 1
152
+ device_map['vision_model'] = 0
153
+ device_map['mlp1'] = 0
154
+ device_map['language_model.model.tok_embeddings'] = 0
155
+ device_map['language_model.model.embed_tokens'] = 0
156
+ device_map['language_model.model.rotary_emb'] = 0
157
+ device_map['language_model.output'] = 0
158
+ device_map['language_model.model.norm'] = 0
159
+ device_map['language_model.lm_head'] = 0
160
+ device_map[f'language_model.model.layers.{num_layers - 1}'] = 0
161
+
162
+ return device_map
163
+
164
+ # Get model dtype
165
+ def get_model_dtype():
166
+ return torch.bfloat16 if torch.cuda.is_available() else torch.float32
167
+
168
+ # Model loading function
169
+ def load_model():
170
+ print(f"\n=== Loading {MODEL_NAME} ===")
171
+ print(f"CUDA available: {torch.cuda.is_available()}")
172
+
173
+ model_dtype = get_model_dtype()
174
+ print(f"Using model dtype: {model_dtype}")
175
+
176
+ if torch.cuda.is_available():
177
+ print(f"GPU count: {torch.cuda.device_count()}")
178
+ for i in range(torch.cuda.device_count()):
179
+ print(f"GPU {i}: {torch.cuda.get_device_name(i)}")
180
+
181
+ # Memory info
182
+ print(f"Total GPU memory: {torch.cuda.get_device_properties(0).total_memory / 1e9:.2f} GB")
183
+ print(f"Allocated GPU memory: {torch.cuda.memory_allocated() / 1e9:.2f} GB")
184
+ print(f"Reserved GPU memory: {torch.cuda.memory_reserved() / 1e9:.2f} GB")
185
+
186
+ # Determine device map
187
+ device_map = "auto"
188
+ if torch.cuda.is_available() and torch.cuda.device_count() > 1:
189
+ model_short_name = MODEL_NAME.split('/')[-1]
190
+ device_map = split_model(model_short_name)
191
+
192
+ # Load model and tokenizer
193
+ try:
194
+ model = AutoModel.from_pretrained(
195
+ MODEL_NAME,
196
+ torch_dtype=model_dtype,
197
+ low_cpu_mem_usage=True,
198
+ trust_remote_code=True,
199
+ device_map=device_map
200
+ )
201
+
202
+ tokenizer = AutoTokenizer.from_pretrained(
203
+ MODEL_NAME,
204
+ use_fast=False,
205
+ trust_remote_code=True
206
+ )
207
+
208
+ print(f"✓ Model and tokenizer loaded successfully!")
209
+ return model, tokenizer
210
+ except Exception as e:
211
+ logger.error(f"❌ Error loading model: {e}")
212
+ logger.error("Detailed traceback:")
213
+ import traceback
214
+ traceback.print_exc()
215
+
216
+ # Check if einops is available
217
+ try:
218
+ import einops
219
+ logger.info(f"einops is available, version: {einops.__version__}")
220
+ except ImportError:
221
+ logger.error("ImportError: einops is not installed! This is required for InternVL2.5.")
222
+
223
+ # Check for CUDA availability
224
+ if torch.cuda.is_available():
225
+ logger.info(f"CUDA is available. Device count: {torch.cuda.device_count()}")
226
+ for i in range(torch.cuda.device_count()):
227
+ logger.info(f"Device {i}: {torch.cuda.get_device_name(i)}")
228
+ logger.info(f"Memory allocated: {torch.cuda.memory_allocated(i) / 1e9:.2f} GB")
229
+ logger.info(f"Memory reserved: {torch.cuda.memory_reserved(i) / 1e9:.2f} GB")
230
+ else:
231
+ logger.warning("CUDA is not available. Running on CPU.") return None, None
232
+
233
+ # Image analysis function using the chat method from documentation
234
+ def analyze_image(model, tokenizer, image, prompt):
235
+ try:
236
+ # Check if image is valid
237
+ if image is None:
238
+ return "Please upload an image first."
239
+
240
+ # Process the image following official pattern
241
+ pixel_values = load_image(image)
242
+
243
+ # Debug info
244
+ print(f"Image processed: tensor shape {pixel_values.shape}, dtype {pixel_values.dtype}")
245
+
246
+ # Define generation config
247
+ generation_config = {
248
+ "max_new_tokens": 512,
249
+ "do_sample": False
250
+ }
251
+
252
+ # Use the model.chat method as shown in the official documentation
253
+ question = f"<image>\n{prompt}"
254
+ response, _ = model.chat(
255
+ tokenizer=tokenizer,
256
+ pixel_values=pixel_values,
257
+ question=question,
258
+ generation_config=generation_config,
259
+ history=None,
260
+ return_history=True
261
+ )
262
+
263
+ return response
264
+ except Exception as e:
265
+ import traceback
266
+ error_msg = f"Error analyzing image: {str(e)}\n{traceback.format_exc()}"
267
+ return error_msg
268
+
269
+ # Main function
270
+ def main():
271
+ # Add debug info at the start of main
272
+ logger.info("Starting main() function...")
273
+ logger.info(f"MODEL_NAME: {MODEL_NAME}")
274
+
275
+ # Load the model
276
+ model, tokenizer = load_model()
277
+
278
+ if model is None:
279
+ # Create an error interface if model loading failed
280
+ demo = gr.Interface(
281
+ fn=lambda x: "Model loading failed. Please check the logs for details.",
282
+ inputs=gr.Textbox(),
283
+ outputs=gr.Textbox(),
284
+ title="InternVL2.5 Image Analyzer - Error",
285
+ description="The model failed to load. Please check the logs for more information."
286
+ )
287
+ return demo
288
+
289
+ # Predefined prompts for analysis
290
+ prompts = [
291
+ "Describe this image in detail.",
292
+ "What can you tell me about this image?",
293
+ "Is there any text in this image? If so, can you read it?",
294
+ "What is the main subject of this image?",
295
+ "What emotions or feelings does this image convey?",
296
+ "Describe the composition and visual elements of this image.",
297
+ "Summarize what you see in this image in one paragraph."
298
+ ]
299
+
300
+ # Create the interface
301
+ demo = gr.Interface(
302
+ fn=lambda img, prompt: analyze_image(model, tokenizer, img, prompt),
303
+ inputs=[
304
+ gr.Image(type="pil", label="Upload Image"),
305
+ gr.Dropdown(choices=prompts, value=prompts[0], label="Select a prompt or write your own below",
306
+ allow_custom_value=True)
307
+ ],
308
+ outputs=gr.Textbox(label="Analysis Results", lines=15),
309
+ title="InternVL2.5 Image Analyzer",
310
+ description="Upload an image and ask the InternVL2.5 model to analyze it.",
311
+ examples=[
312
+ ["example_images/example1.jpg", "Describe this image in detail."],
313
+ ["example_images/example2.jpg", "What can you tell me about this image?"]
314
+ ],
315
+ theme=gr.themes.Soft(),
316
+ allow_flagging="never"
317
+ )
318
+
319
+ return demo
320
+
321
+ # Run the application
322
+ if __name__ == "__main__":
323
+ try:
324
+ # Check for GPU
325
+ if not torch.cuda.is_available():
326
+ print("WARNING: CUDA is not available. The model requires a GPU to function properly.")
327
+
328
+ # Create and launch the interface
329
+ demo = main()
330
+ demo.launch(server_name="0.0.0.0")
331
+ except Exception as e:
332
+ print(f"Error starting the application: {e}")
333
+ import traceback
334
+ traceback.print_exc()