Spaces:
Running
Running
Update app.py
Browse files
app.py
CHANGED
@@ -1,81 +1,46 @@
|
|
1 |
import torch
|
2 |
-
import torchvision.transforms as T
|
3 |
from PIL import Image
|
4 |
-
from transformers import AutoModel,
|
5 |
import gradio as gr
|
6 |
-
import logging
|
7 |
|
8 |
-
#
|
9 |
-
logging.basicConfig(level=logging.INFO, format="%(asctime)s - %(levelname)s - %(message)s")
|
10 |
-
|
11 |
-
# Device Configuration
|
12 |
-
device = torch.device("cpu") # Force CPU usage
|
13 |
-
|
14 |
-
# ImageNet normalization values
|
15 |
-
IMAGENET_MEAN = (0.485, 0.456, 0.406)
|
16 |
-
IMAGENET_STD = (0.229, 0.224, 0.225)
|
17 |
-
|
18 |
-
def build_transform(input_size):
|
19 |
-
"""Build preprocessing pipeline for images."""
|
20 |
-
transform = T.Compose([
|
21 |
-
T.Lambda(lambda img: img.convert("RGB") if img.mode != "RGB" else img),
|
22 |
-
T.Resize((input_size, input_size), interpolation=T.InterpolationMode.BICUBIC),
|
23 |
-
T.ToTensor(),
|
24 |
-
T.Normalize(mean=IMAGENET_MEAN, std=IMAGENET_STD),
|
25 |
-
])
|
26 |
-
return transform
|
27 |
-
|
28 |
-
def preprocess_image(image, input_size=448):
|
29 |
-
"""Preprocess the image to the required format."""
|
30 |
-
transform = build_transform(input_size)
|
31 |
-
tensor_image = transform(image).unsqueeze(0).to(torch.float32) # Use float32 for CPU
|
32 |
-
return tensor_image
|
33 |
-
|
34 |
-
# Load the model and tokenizer
|
35 |
-
logging.info("Loading model from Hugging Face Hub...")
|
36 |
-
model_path = "OpenGVLab/InternVL2_5-1B"
|
37 |
model = AutoModel.from_pretrained(
|
38 |
-
|
39 |
-
|
40 |
-
|
|
|
|
|
41 |
|
42 |
-
|
|
|
43 |
|
44 |
-
#
|
45 |
-
|
46 |
-
tokenizer.add_tokens(["<image>"])
|
47 |
-
model.resize_token_embeddings(len(tokenizer)) # Resize model embeddings
|
48 |
-
|
49 |
-
assert "<image>" in tokenizer.get_vocab(), "Error: `<image>` token is missing from tokenizer vocabulary."
|
50 |
-
|
51 |
-
def describe_image(image):
|
52 |
-
"""Generate a description for the uploaded image."""
|
53 |
try:
|
54 |
-
|
55 |
-
|
56 |
-
|
57 |
-
#
|
58 |
-
|
59 |
-
|
60 |
-
|
61 |
-
|
62 |
-
|
63 |
-
|
64 |
-
|
65 |
-
|
66 |
-
return response
|
67 |
except Exception as e:
|
68 |
-
|
69 |
-
|
70 |
-
|
71 |
-
|
72 |
-
|
73 |
-
|
74 |
-
|
75 |
-
|
76 |
-
|
77 |
-
description="Upload an image to extract text using the pretrained model.",
|
78 |
)
|
79 |
|
|
|
80 |
if __name__ == "__main__":
|
81 |
-
|
|
|
1 |
import torch
|
|
|
2 |
from PIL import Image
|
3 |
+
from transformers import AutoModel, CLIPImageProcessor
|
4 |
import gradio as gr
|
|
|
5 |
|
6 |
+
# Load the model
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
7 |
model = AutoModel.from_pretrained(
|
8 |
+
'OpenGVLab/InternViT-6B-448px-V1-5',
|
9 |
+
torch_dtype=torch.bfloat16,
|
10 |
+
low_cpu_mem_usage=True,
|
11 |
+
trust_remote_code=True
|
12 |
+
).cuda().eval()
|
13 |
|
14 |
+
# Load the image processor
|
15 |
+
image_processor = CLIPImageProcessor.from_pretrained('OpenGVLab/InternViT-6B-448px-V1-5')
|
16 |
|
17 |
+
# Define the function to process the image and generate outputs
|
18 |
+
def process_image(image):
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
19 |
try:
|
20 |
+
# Convert uploaded image to RGB
|
21 |
+
image = image.convert('RGB')
|
22 |
+
|
23 |
+
# Preprocess the image
|
24 |
+
pixel_values = image_processor(images=image, return_tensors='pt').pixel_values
|
25 |
+
pixel_values = pixel_values.to(torch.bfloat16).cuda()
|
26 |
+
|
27 |
+
# Run the model
|
28 |
+
outputs = model(pixel_values)
|
29 |
+
|
30 |
+
# Assuming the model returns embeddings or features
|
31 |
+
return f"Output Shape: {outputs.last_hidden_state.shape}"
|
|
|
32 |
except Exception as e:
|
33 |
+
return f"Error: {str(e)}"
|
34 |
+
|
35 |
+
# Create the Gradio interface
|
36 |
+
demo = gr.Interface(
|
37 |
+
fn=process_image, # Function to process the input
|
38 |
+
inputs=gr.Image(type="pil"), # Accepts images as input
|
39 |
+
outputs=gr.Textbox(label="Model Output"), # Displays model output
|
40 |
+
title="InternViT Demo",
|
41 |
+
description="Upload an image to process it using the InternViT model from OpenGVLab."
|
|
|
42 |
)
|
43 |
|
44 |
+
# Launch the demo
|
45 |
if __name__ == "__main__":
|
46 |
+
demo.launch(server_name="0.0.0.0", server_port=7860)
|