Spaces:
Runtime error
Runtime error
File size: 4,778 Bytes
50a465a 25a5261 5dae26f 2ff2fb2 095a8ee 2ff2fb2 095a8ee 2ff2fb2 719c202 a78c4d2 5dae26f 464a3bb a78c4d2 2ff2fb2 a78c4d2 2ff2fb2 095a8ee 2ff2fb2 464a3bb a78c4d2 464a3bb 095a8ee 2ff2fb2 095a8ee 2ff2fb2 095a8ee 2ff2fb2 464a3bb 2ff2fb2 9f9688c a78c4d2 6620629 e50ce70 9f9688c 5dae26f a2e1737 2ff2fb2 5c76b5d fcf9342 5c76b5d a2e1737 464a3bb a2e1737 2ff2fb2 a2e1737 5dae26f a78c4d2 bebeab0 2ff2fb2 a2e1737 5dae26f a78c4d2 5dae26f a78c4d2 |
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 |
import os
import time
import gradio as gr
from typing import *
from pillow_heif import register_heif_opener
register_heif_opener()
from PIL import Image
import numpy as np
import vision_agent as va
from vision_agent.tools import owl_v2, overlay_bounding_boxes, save_image
from vision_agent.tools import load_image, owl_v2, overlay_bounding_boxes, save_image
from huggingface_hub import login
import spaces
# Perform login using the token
hf_token = os.getenv("HF_TOKEN")
login(token=hf_token, add_to_git_credential=True)
def detect_brain_tumor(image, debug: bool = False) -> str:
"""
Detects a brain tumor in the given image and saves the image with bounding boxes.
Parameters:
image: The input image (can be PIL Image, numpy array, or file path).
debug (bool): Flag to enable logging for debugging purposes.
Returns:
str: Path to the saved output image.
"""
# Generate a unique output filename
output_path = f"./output/tumor_detection_{int(time.time())}.jpg"
# Ensure image is in the correct format
if isinstance(image, str):
# If image is a file path
image = Image.open(image)
elif isinstance(image, np.ndarray):
# If image is already a numpy array
image = Image.fromarray(image)
elif not isinstance(image, Image.Image):
raise ValueError("Unsupported image type. Please provide a PIL Image, numpy array, or file path.")
# Convert to RGB if it's not
image = image.convert('RGB')
# Convert PIL Image to numpy array for owl_v2
image_array = np.array(image)
if debug:
print(f"Image loaded and converted to numpy array of shape {image_array.shape}")
# Step 2: Detect brain tumor using owl_v2
prompt = "detect brain tumor"
detections = owl_v2(prompt, image_array)
if debug:
print(f"Detections: {detections}")
# Step 3: Overlay bounding boxes on the image
image_with_bboxes = overlay_bounding_boxes(image, detections)
if debug:
print("Bounding boxes overlaid on the image")
# Step 4: Save the resulting image
save_image(image_with_bboxes, output_path)
if debug:
print(f"Image saved to {output_path}")
return output_path
# Example usage (uncomment to run):
# detect_brain_tumor("/content/drive/MyDrive/kaggle/datasets/brain-tumor-image-dataset-semantic-segmentation_old/train_categories/1385_jpg.rf.3c67cb92e2922dba0e6dba86f69df40b.jpg", "/content/drive/MyDrive/kaggle/datasets/brain-tumor-image-dataset-semantic-segmentation_old/output/1385_jpg.rf.3c67cb92e2922dba0e6dba86f69df40b.jpg", debug=True)
#########
INTRO_TEXT="# 🔬🧠 CellVision AI -- Intelligent Cell Imaging Analysis 🤖🧫"
IMAGE_PROMPT="Are these cells healthy or cancerous?"
with gr.Blocks(css="style.css") as demo:
gr.Markdown(INTRO_TEXT)
with gr.Tab("Agentic Detection"):
with gr.Row():
with gr.Column():
image = gr.Image(type="pil")
with gr.Column():
text_input = gr.Text(label="Input Text")
text_output = gr.Text(label="Text Output")
chat_btn = gr.Button()
chat_inputs = [
image
]
chat_outputs = [
text_output
]
chat_btn.click(
fn=detect_brain_tumor,
inputs=chat_inputs,
outputs=chat_outputs,
)
examples = [["./examples/194_jpg.rf.3e3dd592d034bb5ee27a978553819f42.jpg", "./output/194_jpg.rf.3e3dd592d034bb5ee27a978553819f42.jpg"],
["./examples/239_jpg.rf.3dcc0799277fb78a2ab21db7761ccaeb.jpg", "./output/239_jpg.rf.3dcc0799277fb78a2ab21db7761ccaeb.jpg"],
["./examples/1385_jpg.rf.3c67cb92e2922dba0e6dba86f69df40b.jpg", "./output/1385_jpg.rf.3c67cb92e2922dba0e6dba86f69df40b.jpg"],
["./examples/1491_jpg.rf.3c658e83538de0fa5a3f4e13d7d85f12.jpg", "./output/1491_jpg.rf.3c658e83538de0fa5a3f4e13d7d85f12.jpg"],
["./examples/1550_jpg.rf.3d067be9580ec32dbee5a89c675d8459.jpg", "./output/1550_jpg.rf.3d067be9580ec32dbee5a89c675d8459.jpg"],
["./examples/2256_jpg.rf.3afd7903eaf3f3c5aa8da4bbb928bc19.jpg", "./output/2256_jpg.rf.3afd7903eaf3f3c5aa8da4bbb928bc19.jpg"],
["./examples/2871_jpg.rf.3b6eadfbb369abc2b3bcb52b406b74f2.jpg", "./output/2871_jpg.rf.3b6eadfbb369abc2b3bcb52b406b74f2.jpg"],
["./examples/2921_jpg.rf.3b952f91f27a6248091e7601c22323ad.jpg", "./output/2921_jpg.rf.3b952f91f27a6248091e7601c22323ad.jpg"],
]
gr.Examples(
examples=examples,
inputs=chat_inputs,
)
#########
if __name__ == "__main__":
demo.queue(max_size=10).launch(debug=True) |