Spaces:
Runtime error
Runtime error
File size: 2,481 Bytes
2f1ea34 9d0734f c05c698 03cd4ef eb950ec f79c9c2 42bb2a8 516fe27 9d0734f 37e1efb 516fe27 37e1efb b39f788 8ab30b2 b39f788 8ab30b2 b39f788 8ab30b2 46b8ce8 c05c698 516fe27 37e1efb 7236bd2 c05c698 eb950ec 2f1ea34 eb950ec 516fe27 6a20c9d eb950ec 516fe27 |
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 |
import gradio as gr
import requests, json
import os
import io
import IPython.display
from PIL import Image
import base64
import torch
from transformers import pipeline
completion_obj = pipeline("image-to-text",model="Salesforce/blip-image-captioning-base")
#def greet(name):
# return "Hello " + name +os.environ['HF_TOKENS']
#demo = gr.Interface(fn=greet, inputs="text", outputs="text")
#demo.launch()
#gr.close_all()
#gr.Textbox(os.environ['HF_TOKENS'])
#Image-to-text endpoint
def get_completion(inputs):
output = get_completion(input)
return output[0]['generated_text']
# headers = {
# "Authorization": f"Bearer {os.environ['HF_TOKENS']}",
# "Content-Type": "application/json"
# }
# data = { "inputs": inputs }
# if parameters is not None:
# data.update({"parameters": parameters})
# response = requests.request("POST",
# ENDPOINT_URL,
# headers=headers,
# data=json.dumps(data))
# return json.loads(response.content.decode("utf-8"))
#demo = gr.Interface(
# fn=get_completion,
# inputs="text",
# outputs="text"
#)
#image_url = "https://free-images.com/sm/9596/dog_animal_greyhound_983023.jpg"
#demo = gr.get_completion(image_url)
def image_to_base64_str(pil_image):
byte_arr = io.BytesIO()
pil_image.save(byte_arr, format='PNG')
byte_arr = byte_arr.getvalue()
return str(base64.b64encode(byte_arr).decode('utf-8'))
def captioner(image):
base64_image = image_to_base64_str(image)
result = get_completion(base64_image)
return result[0]['generated_text']
gr.close_all()
demo = gr.Interface(fn=captioner,
inputs=[gr.Image(label="Upload image", type="pil")],
outputs=[gr.Textbox(label="Caption")],
title="Image Captioning with BLIP",
description="Caption any image using the BLIP model",
allow_flagging="never")
#demo = gr.Interface(fn=captioner,
# inputs=[gr.Image(label="Upload image", type="pil")],
#// outputs=[gr.Textbox(label="Caption")],
# // title="Image Captioning with BLIP",
# // description="Caption any image using the BLIP model",
# // allow_flagging="never",
# // examples=["christmas_dog.jpeg", "bird_flight.jpeg", "cow.jpeg"])
demo.launch() |