import torch from transformers import pipeline from PIL import Image import gradio as gr import os # Specify the device (CPU or GPU) device = torch.device("cuda" if torch.cuda.is_available() else "cpu") # Load the image-to-text pipeline caption_image = pipeline("image-to-text", model="Salesforce/blip-image-captioning-large", device=device) translate = pipeline("translation_en_to_ar", model="Helsinki-NLP/opus-mt-en-ar") # List of local image paths example_images = ["flower.jpg"] def Arabic_Image_Captioning(image): caption = caption_image(image) caption = caption[0]['generated_text'] arabic_caption = translate(caption) arabic_caption = arabic_caption[0]['translation_text'] html_result = f'