Spaces:
Runtime error
Runtime error
File size: 5,018 Bytes
50a465a 57a0425 d1d6d52 57a0425 d1d6d52 57a0425 d1d6d52 0dc767e a78c4d2 5020c09 a78c4d2 2ff2fb2 5020c09 2ff2fb2 464a3bb 59ad67f a78c4d2 2ff2fb2 57a0425 2ff2fb2 43ba1c2 2ff2fb2 57a0425 2ff2fb2 57a0425 2ff2fb2 57a0425 5020c09 0dc767e 57a0425 5020c09 464a3bb 8d7bc9c 9f9688c 5dae26f a2e1737 10b8cf9 5c76b5d d9f0542 5020c09 10b8cf9 fcf9342 10b8cf9 a2e1737 10b8cf9 2ff2fb2 a2e1737 10b8cf9 a2e1737 5dae26f 57a0425 |
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 |
import os
import logging
import sys
def get_logging_level(default_level=logging.INFO):
log_level_str = os.getenv('VISION_AGENT_LOG_LEVEL', '').upper()
if log_level_str == 'DEBUG':
return logging.DEBUG
elif log_level_str == 'INFO':
return logging.INFO
elif log_level_str == 'WARNING':
return logging.WARNING
elif log_level_str == 'ERROR':
return logging.ERROR
elif log_level_str == 'CRITICAL':
return logging.CRITICAL
else:
return default_level
# Set up logging before importing the library
logging_level = get_logging_level()
logging.basicConfig(stream=sys.stdout, level=logging_level)
_LOGGER = logging.getLogger(__name__)
# Explicitly set logging level for the vision-agent library
vision_agent_logger = logging.getLogger('vision_agent')
vision_agent_logger.setLevel(logging_level)
# Print the logging level to verify it's set correctly
print(f"Logging level set to: {logging.getLevelName(logging_level)}")
from huggingface_hub import login
import time
import gradio as gr
from typing import *
from pillow_heif import register_heif_opener
register_heif_opener()
import vision_agent as va
from vision_agent.tools import register_tool, load_image, owl_v2, overlay_bounding_boxes, save_image
# Perform login using the token
hf_token = os.getenv("HF_TOKEN")
login(token=hf_token, add_to_git_credential=True)
import numpy as np
from PIL import Image
def detect_brain_tumor(image, seg_input, debug: bool = True):
"""
Detects a brain tumor in the given image and returns the annotated image.
Parameters:
image: The input image (as numpy array provided by Gradio).
seg_input: The segmentation input (not used in this function, but required for Gradio).
debug (bool): Flag to enable logging for debugging purposes.
Returns:
tuple: (numpy array of image, list of (label, (x1, y1, x2, y2)) tuples)
"""
if debug:
_LOGGER.debug(f"Image received, shape: {image.shape}")
# Step 2: Detect brain tumor using owl_v2
prompt = "detect brain tumor"
detections = owl_v2(prompt, image)
if debug:
_LOGGER.debug(f"Raw detections: {detections}")
# Step 3: Overlay bounding boxes on the image
image_with_bboxes = overlay_bounding_boxes(image, detections)
if debug:
_LOGGER.debug("Bounding boxes overlaid on the image")
# Prepare annotations for AnnotatedImage output
annotations = []
for detection in detections:
label = detection['label']
score = detection['score']
bbox = detection['bbox']
x1, y1, x2, y2 = bbox
# Convert normalized coordinates to pixel coordinates
height, width = image.shape[:2]
x1, y1, x2, y2 = int(x1*width), int(y1*height), int(x2*width), int(y2*height)
annotations.append(((x1, y1, x2, y2), f"{label} {score:.2f}"))
if debug:
_LOGGER.debug(f"Annotations: {annotations}")
# Convert image to numpy array if it's not already
if isinstance(image_with_bboxes, Image.Image):
image_with_bboxes = np.array(image_with_bboxes)
return (image_with_bboxes, annotations)
INTRO_TEXT="# 🔬🧠 OmniScience -- Agentic Imaging Analysis 🤖🧫"
with gr.Blocks(css="style.css") as demo:
gr.Markdown(INTRO_TEXT)
with gr.Tab("Segment/Detect"):
with gr.Row():
with gr.Column():
image = gr.Image(type="numpy")
seg_input = gr.Text(label="Entities to Segment/Detect", value="detect brain tumor")
with gr.Column():
annotated_image = gr.AnnotatedImage(label="Output")
seg_btn = gr.Button("Submit")
examples = [["./examples/194_jpg.rf.3e3dd592d034bb5ee27a978553819f42.jpg", "detect brain tumor"],
["./examples/239_jpg.rf.3dcc0799277fb78a2ab21db7761ccaeb.jpg", "detect brain tumor"],
["./examples/1385_jpg.rf.3c67cb92e2922dba0e6dba86f69df40b.jpg", "detect brain tumor"],
["./examples/1491_jpg.rf.3c658e83538de0fa5a3f4e13d7d85f12.jpg", "detect brain tumor"],
["./examples/1550_jpg.rf.3d067be9580ec32dbee5a89c675d8459.jpg", "detect brain tumor"],
["./examples/2256_jpg.rf.3afd7903eaf3f3c5aa8da4bbb928bc19.jpg", "detect brain tumor"],
["./examples/2871_jpg.rf.3b6eadfbb369abc2b3bcb52b406b74f2.jpg", "detect brain tumor"],
["./examples/2921_jpg.rf.3b952f91f27a6248091e7601c22323ad.jpg", "detect brain tumor"],
]
gr.Examples(
examples=examples,
inputs=[image, seg_input],
)
seg_inputs = [
image,
seg_input
]
seg_outputs = [
annotated_image
]
seg_btn.click(
fn=detect_brain_tumor,
inputs=seg_inputs,
outputs=seg_outputs,
)
if __name__ == "__main__":
demo.queue(max_size=10).launch(debug=True)
|