Spaces:
Runtime error
Runtime error
File size: 3,403 Bytes
50a465a 25a5261 5dae26f 2ff2fb2 43ba1c2 2ff2fb2 719c202 a78c4d2 5dae26f 464a3bb a78c4d2 2ff2fb2 a78c4d2 2ff2fb2 43ba1c2 2ff2fb2 464a3bb a78c4d2 464a3bb 2ff2fb2 43ba1c2 2ff2fb2 43ba1c2 2ff2fb2 464a3bb 6620629 e50ce70 9f9688c 5dae26f a2e1737 10b8cf9 5c76b5d d9f0542 10b8cf9 fcf9342 10b8cf9 a2e1737 10b8cf9 2ff2fb2 a2e1737 10b8cf9 a2e1737 5dae26f a78c4d2 |
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 |
import os
import time
import gradio as gr
from typing import *
from pillow_heif import register_heif_opener
register_heif_opener()
import vision_agent as va
from vision_agent.tools import register_tool
from vision_agent.tools import load_image, owl_v2, overlay_bounding_boxes, save_image
from huggingface_hub import login
import spaces
# Perform login using the token
hf_token = os.getenv("HF_TOKEN")
login(token=hf_token, add_to_git_credential=True)
def detect_brain_tumor(image, debug: bool = False) -> str:
"""
Detects a brain tumor in the given image and saves the image with bounding boxes.
Parameters:
image: The input image (as provided by Gradio).
debug (bool): Flag to enable logging for debugging purposes.
Returns:
str: Path to the saved output image.
"""
# Generate a unique output filename
output_path = f"./output/tumor_detection_{int(time.time())}.jpg"
if debug:
print(f"Image received")
# Step 2: Detect brain tumor using owl_v2
prompt = "detect brain tumor"
detections = owl_v2(prompt, image)
if debug:
print(f"Detections: {detections}")
# Step 3: Overlay bounding boxes on the image
image_with_bboxes = overlay_bounding_boxes(image, detections)
if debug:
print("Bounding boxes overlaid on the image")
# Step 4: Save the resulting image
save_image(image_with_bboxes, output_path)
if debug:
print(f"Image saved to {output_path}")
return output_path
INTRO_TEXT="# 🔬🧠 CellVision AI -- Intelligent Cell Imaging Analysis 🤖🧫"
IMAGE_PROMPT="Are these cells healthy or cancerous?"
with gr.Blocks(css="style.css") as demo:
gr.Markdown(INTRO_TEXT)
with gr.Tab("Segment/Detect"):
with gr.Row():
with gr.Column():
image = gr.Image(type="numpy")
seg_input = gr.Text(label="Entities to Segment/Detect")
with gr.Column():
annotated_image = gr.AnnotatedImage(label="Output")
seg_btn = gr.Button("Submit")
examples = [["./examples/194_jpg.rf.3e3dd592d034bb5ee27a978553819f42.jpg", "detect brain tumor"],
["./examples/239_jpg.rf.3dcc0799277fb78a2ab21db7761ccaeb.jpg", "detect brain tumor"],
["./examples/1385_jpg.rf.3c67cb92e2922dba0e6dba86f69df40b.jpg", "detect brain tumor"],
["./examples/1491_jpg.rf.3c658e83538de0fa5a3f4e13d7d85f12.jpg", "detect brain tumor"],
["./examples/1550_jpg.rf.3d067be9580ec32dbee5a89c675d8459.jpg", "detect brain tumor"],
["./examples/2256_jpg.rf.3afd7903eaf3f3c5aa8da4bbb928bc19.jpg", "detect brain tumor"],
["./examples/2871_jpg.rf.3b6eadfbb369abc2b3bcb52b406b74f2.jpg", "detect brain tumor"],
["./examples/2921_jpg.rf.3b952f91f27a6248091e7601c22323ad.jpg", "detect brain tumor"],
]
gr.Examples(
examples=examples,
inputs=[image, seg_input],
)
seg_inputs = [
image,
seg_input
]
seg_outputs = [
annotated_image
]
seg_btn.click(
fn=detect_brain_tumor,
inputs=seg_inputs,
outputs=seg_outputs,
)
if __name__ == "__main__":
demo.queue(max_size=10).launch(debug=True) |