from transformers_js import import_transformers_js, as_url
import gradio as gr
transformers = await import_transformers_js()
pipeline = transformers.pipeline
depth_estimator = await pipeline('depth-estimation', 'Xenova/depth-anything-large-hf');
async def estimate(input_image):
output = await depth_estimator(as_url(input_image))
# print("blob:", output.depth.toBlob())
return output
demo = gr.Interface(
fn=estimate,
inputs=[
gr.Image(type="filepath")
],
outputs=[
gr.JSON(),
],
examples=[
["bread_small.png"]
]
)
demo.launch()
transformers_js_py