Spaces:
Sleeping
Sleeping
Update app.py
Browse files
app.py
CHANGED
@@ -2,26 +2,25 @@ import gradio as gr
|
|
2 |
from transformers import pipeline
|
3 |
|
4 |
# Load the image-to-text pipeline
|
5 |
-
|
|
|
|
|
|
|
6 |
|
7 |
-
def generate_caption(input_image):
|
8 |
-
# Generate caption for the input image
|
|
|
9 |
caption = image_to_text_pipeline(input_image)[0]['generated_text']
|
10 |
return caption
|
11 |
|
12 |
-
# Additional configuration for the output component
|
13 |
-
output_textbox = gr.outputs.Textbox(label="Generated Caption", type="auto", lines=5)
|
14 |
-
|
15 |
# Interface for launching the model
|
16 |
interface = gr.Interface(
|
17 |
fn=generate_caption,
|
18 |
-
inputs=gr.
|
19 |
-
outputs=
|
20 |
title="Image Captioning Model",
|
21 |
description="This model generates captions for images.",
|
22 |
theme="default",
|
23 |
-
layout="vertical",
|
24 |
-
allow_screenshot=True
|
25 |
)
|
26 |
|
27 |
# Launch the interface
|
|
|
2 |
from transformers import pipeline
|
3 |
|
4 |
# Load the image-to-text pipeline
|
5 |
+
image_to_text_pipelines = {
|
6 |
+
"Salesforce/blip-image-captioning-base": pipeline("image-to-text", model="Salesforce/blip-image-captioning-base"),
|
7 |
+
# Add more models if needed
|
8 |
+
}
|
9 |
|
10 |
+
def generate_caption(input_image, model_name="Salesforce/blip-image-captioning-base"):
|
11 |
+
# Generate caption for the input image using the selected model
|
12 |
+
image_to_text_pipeline = image_to_text_pipelines[model_name]
|
13 |
caption = image_to_text_pipeline(input_image)[0]['generated_text']
|
14 |
return caption
|
15 |
|
|
|
|
|
|
|
16 |
# Interface for launching the model
|
17 |
interface = gr.Interface(
|
18 |
fn=generate_caption,
|
19 |
+
inputs=gr.Image(type='pil', label="Input Image"),
|
20 |
+
outputs="text",
|
21 |
title="Image Captioning Model",
|
22 |
description="This model generates captions for images.",
|
23 |
theme="default",
|
|
|
|
|
24 |
)
|
25 |
|
26 |
# Launch the interface
|