from transformers import VisionEncoderDecoderModel, ViTFeatureExtractor, AutoTokenizer import torch from PIL import Image # Load model and tokenizer from the Hugging Face repository model_name = "aryan083/vit-gpt2-image-captioning" model = VisionEncoderDecoderModel.from_pretrained(model_name) feature_extractor = ViTFeatureExtractor.from_pretrained(model_name) tokenizer = AutoTokenizer.from_pretrained(model_name) device = torch.device('cuda' if torch.cuda.is_available() else 'cpu') model.to(device) max_length = 16 num_beams = 4 gen_kwargs = {'max_length': max_length, 'num_beams': num_beams} def predict_step(image_path): image = Image.open(image_path) pixel_values = feature_extractor(images=image, return_tensors='pt').pixel_values pixel_values = pixel_values.to(device) output_ids = model.generate(pixel_values, **gen_kwargs) preds = tokenizer.batch_decode(output_ids, skip_special_tokens=True) preds = [pred.strip() for pred in preds] return preds[0] # Example usage with your image file image_path = 'jon-parry-C8eSYwQkwHw-unsplash.jpg' print(predict_step(image_path=image_path))