File size: 3,841 Bytes
50a465a
25a5261
5dae26f
2ff2fb2
 
 
 
43ba1c2
2ff2fb2
719c202
a78c4d2
 
 
 
 
 
5dae26f
5020c09
 
 
 
a78c4d2
5020c09
a78c4d2
2ff2fb2
5020c09
 
2ff2fb2
464a3bb
 
59ad67f
a78c4d2
2ff2fb2
43ba1c2
2ff2fb2
 
 
43ba1c2
2ff2fb2
 
 
 
 
 
 
 
5020c09
 
 
 
 
 
 
9a473c6
5020c09
 
 
 
 
 
464a3bb
6620629
e50ce70
9f9688c
5dae26f
a2e1737
10b8cf9
5c76b5d
 
d9f0542
5020c09
10b8cf9
fcf9342
10b8cf9
a2e1737
10b8cf9
 
 
 
 
 
 
 
 
2ff2fb2
a2e1737
 
10b8cf9
 
 
 
 
 
 
 
 
 
 
 
 
a2e1737
5dae26f
 
a78c4d2
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
import os
import time
import gradio as gr
from typing import *
from pillow_heif import register_heif_opener
register_heif_opener()
import vision_agent as va
from vision_agent.tools import register_tool
from vision_agent.tools import load_image, owl_v2, overlay_bounding_boxes, save_image

from huggingface_hub import login
import spaces

# Perform login using the token
hf_token = os.getenv("HF_TOKEN")
login(token=hf_token, add_to_git_credential=True)

import numpy as np
from PIL import Image

def detect_brain_tumor(image, seg_input, debug: bool = False):
    """
    Detects a brain tumor in the given image and returns the annotated image.

    Parameters:
        image: The input image (as numpy array provided by Gradio).
        seg_input: The segmentation input (not used in this function, but required for Gradio).
        debug (bool): Flag to enable logging for debugging purposes.

    Returns:
        tuple: (numpy array of image, list of (label, (x1, y1, x2, y2)) tuples)
    """
    if debug:
        print(f"Image received")

    # Step 2: Detect brain tumor using owl_v2
    prompt = "detect brain tumor"
    detections = owl_v2(prompt, image)
    if debug:
        print(f"Detections: {detections}")

    # Step 3: Overlay bounding boxes on the image
    image_with_bboxes = overlay_bounding_boxes(image, detections)
    if debug:
        print("Bounding boxes overlaid on the image")

    # Prepare annotations for AnnotatedImage output
    annotations = []
    for detection in detections:
        label = detection['label']
        score = detection['score']
        bbox = detection['bbox']
        x1, y1, x2, y2 = bbox
        annotations.append((f"{label} {score:.2f}", (x1, y1, x2, y2)))

    # Convert image to numpy array if it's not already
    if isinstance(image_with_bboxes, Image.Image):
        image_with_bboxes = np.array(image_with_bboxes)

    return (image_with_bboxes, annotations)

INTRO_TEXT="# 🔬🧠 CellVision AI -- Intelligent Cell Imaging Analysis 🤖🧫"
IMAGE_PROMPT="Are these cells healthy or cancerous?"

with gr.Blocks(css="style.css") as demo:
    gr.Markdown(INTRO_TEXT)
    with gr.Tab("Segment/Detect"):
        with gr.Row():
            with gr.Column():
                image = gr.Image(type="numpy")
                seg_input = gr.Text(label="Entities to Segment/Detect", value="detect brain tumor")
        
            with gr.Column():
                annotated_image = gr.AnnotatedImage(label="Output")

        seg_btn = gr.Button("Submit")    
        examples = [["./examples/194_jpg.rf.3e3dd592d034bb5ee27a978553819f42.jpg", "detect brain tumor"],
                    ["./examples/239_jpg.rf.3dcc0799277fb78a2ab21db7761ccaeb.jpg", "detect brain tumor"],
                    ["./examples/1385_jpg.rf.3c67cb92e2922dba0e6dba86f69df40b.jpg", "detect brain tumor"],
                    ["./examples/1491_jpg.rf.3c658e83538de0fa5a3f4e13d7d85f12.jpg", "detect brain tumor"],
                    ["./examples/1550_jpg.rf.3d067be9580ec32dbee5a89c675d8459.jpg", "detect brain tumor"],
                    ["./examples/2256_jpg.rf.3afd7903eaf3f3c5aa8da4bbb928bc19.jpg", "detect brain tumor"],
                    ["./examples/2871_jpg.rf.3b6eadfbb369abc2b3bcb52b406b74f2.jpg", "detect brain tumor"],
                    ["./examples/2921_jpg.rf.3b952f91f27a6248091e7601c22323ad.jpg", "detect brain tumor"],
                    ]
        gr.Examples(
            examples=examples,
            inputs=[image, seg_input],
        )
        seg_inputs = [
            image,
            seg_input
            ]
        seg_outputs = [
            annotated_image
        ]
        seg_btn.click(
            fn=detect_brain_tumor,
            inputs=seg_inputs,
            outputs=seg_outputs,
        )

if __name__ == "__main__":
    demo.queue(max_size=10).launch(debug=True)