Spaces:
Runtime error
Runtime error
File size: 2,224 Bytes
2f1ea34 9d0734f c05c698 03cd4ef f79c9c2 42bb2a8 9d0734f 7a513d5 37e1efb 7a513d5 37e1efb b39f788 8ab30b2 b39f788 8ab30b2 b39f788 8ab30b2 46b8ce8 c05c698 3f484f6 516fe27 7a513d5 3f484f6 516fe27 37e1efb c05c698 eb950ec 516fe27 b56b22d 516fe27 6a20c9d eb950ec 516fe27 |
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 |
import gradio as gr
import requests, json
import os
import io
import IPython.display
import base64
import torch
from PIL import Image
from transformers import BlipProcessor, BlipForConditionalGeneration
processor = BlipProcessor.from_pretrained("Salesforce/blip-image-captioning-base")
model = BlipForConditionalGeneration.from_pretrained("Salesforce/blip-image-captioning-base")
#def greet(name):
# return "Hello " + name +os.environ['HF_TOKENS']
#demo = gr.Interface(fn=greet, inputs="text", outputs="text")
#demo.launch()
#gr.close_all()
#gr.Textbox(os.environ['HF_TOKENS'])
#Image-to-text endpoint
def get_completion(raw_image):
#raw_image = Image.open(image).convert('RGB')
text = "a photography of"
inputs = processor(raw_image, text, return_tensors="pt")
out = model.generate(**inputs)
return processor.decode(out[0], skip_special_tokens=True)
# headers = {
# "Authorization": f"Bearer {os.environ['HF_TOKENS']}",
# "Content-Type": "application/json"
# }
# data = { "inputs": inputs }
# if parameters is not None:
# data.update({"parameters": parameters})
# response = requests.request("POST",
# ENDPOINT_URL,
# headers=headers,
# data=json.dumps(data))
# return json.loads(response.content.decode("utf-8"))
gr.close_all()
demo = gr.Interface(fn=get_completion,
inputs=[gr.Image(label="Upload image", type="pil")],
outputs=[gr.Textbox(label="Caption")],
title="Image Captioning with BLIP",
description="Caption any image using the BLIP model",
allow_flagging="never")
#demo = gr.Interface(fn=captioner,
# inputs=[gr.Image(label="Upload image", type="pil")],
#// outputs=[gr.Textbox(label="Caption")],
# // title="Image Captioning with BLIP",
# // description="Caption any image using the BLIP model",
# // allow_flagging="never",
# // examples=["christmas_dog.jpeg", "bird_flight.jpeg", "cow.jpeg"])
demo.launch() |