Spaces:
Running
Running
from inference_sdk import InferenceHTTPClient | |
from PIL import Image, ImageDraw | |
import gradio as gr | |
import tempfile | |
import json | |
from gradio import Blocks | |
def greet(name): | |
return "Hello " + name + "!!" | |
# Initialize Roboflow Clients | |
ROOM_CLIENT = InferenceHTTPClient(api_url="https://outline.roboflow.com", api_key="qwnldFeqXRNGRd2T4vZy") | |
DOOR_WINDOW_CLIENT = InferenceHTTPClient(api_url="https://detect.roboflow.com", api_key="qwnldFeqXRNGRd2T4vZy") | |
color_options = ["Red", "Green", "Blue", "Yellow"] | |
layer_options = ["Room Detection", "Doors and Windows Detection"] | |
def apply_zoom(image, zoom_factor): | |
width, height = image.size | |
new_width = int(width * zoom_factor) | |
new_height = int(height * zoom_factor) | |
return image.resize((new_width, new_height)) | |
def detect_and_draw(image_path, model_id, client, filter_classes=None,color_choice=None): | |
result = client.infer(image_path, model_id=model_id) | |
with open('result.json', "w") as json_file: | |
json.dump(result, json_file, indent=4) | |
original_img = Image.open(image_path) | |
overlayed_img = original_img.copy() | |
draw = ImageDraw.Draw(overlayed_img) | |
# Dictionary to hold counts of all detected classes | |
counts = {} | |
for prediction in result.get('predictions', []): | |
# Extract class name | |
pred_class = prediction.get('class', '').lower() | |
# If filter_classes is provided, skip classes not in the list | |
if filter_classes and pred_class not in filter_classes: | |
continue | |
# Increment the count for the class in the dictionary | |
counts[pred_class] = counts.get(pred_class, 0) + 1 | |
# Extract bounding box coordinates | |
x = int(prediction['x'] - prediction['width'] / 2) | |
y = int(prediction['y'] - prediction['height'] / 2) | |
width = int(prediction['width']) | |
height = int(prediction['height']) | |
# Draw rectangle | |
draw.rectangle([x, y, x + width, y + height], outline=color_choice, width=2) | |
# Add label | |
label = f"{pred_class}" | |
draw.text((x, y - 10), label, fill=color_choice) | |
return overlayed_img, counts | |
def process_floor_plan(image,zoom_factor,color_choice,selected_layers): | |
try: | |
# Save the uploaded image to a temporary file | |
with tempfile.NamedTemporaryFile(suffix=".png", delete=False) as temp_file: | |
image.save(temp_file.name) | |
temp_file_path = temp_file.name | |
zoomed_image = apply_zoom(Image.open(temp_file_path), zoom_factor) | |
zoomed_image.save(temp_file_path) | |
layers_to_process = [] | |
# Detect rooms | |
room_overlay, room_counts = detect_and_draw(temp_file_path, "room-segmentation-frntt/1", ROOM_CLIENT, filter_classes=["room"],color_choice=color_choice) | |
layers_to_process.append(("Room Detection", room_overlay)) | |
# Detect doors and windows | |
dw_overlay, dw_counts = detect_and_draw(temp_file_path, "door-detection-model/2", DOOR_WINDOW_CLIENT, filter_classes=["door", "window"],color_choice=color_choice) | |
layers_to_process.append(("Doors and Windows Detection", dw_overlay)) | |
outputs = [] | |
combined_counts = {} | |
# Process each selected layer | |
if "Room Detection" in selected_layers: | |
room_overlay, room_counts = detect_and_draw( | |
temp_file_path, "room-segmentation-frntt/1", ROOM_CLIENT, filter_classes=["room"], color_choice=color_choice | |
) | |
outputs.append(("Room Detection", room_overlay)) | |
combined_counts.update(room_counts) | |
if "Doors and Windows Detection" in selected_layers: | |
dw_overlay, dw_counts = detect_and_draw( | |
temp_file_path, "door-detection-model/2", DOOR_WINDOW_CLIENT, filter_classes=["door", "window"], color_choice=color_choice | |
) | |
outputs.append(("Doors and Windows Detection", dw_overlay)) | |
combined_counts.update(dw_counts) | |
return [img[1] for img in outputs], json.dumps(combined_counts, indent=4) | |
except Exception as e: | |
print(f"Error processing floor plan: {e}") | |
return [], json.dumps({"error": str(e)}, indent=4) | |
measurement_points = [] | |
def add_measurement_point(x, y): | |
global measurement_points | |
measurement_points.append((x, y)) | |
if len(measurement_points) == 2: | |
distance = ((measurement_points[1][0] - measurement_points[0][0])**2 + | |
(measurement_points[1][1] - measurement_points[0][1])**2)**0.5 | |
print(f"Distance between points: {distance:.2f} pixels") | |
def clear_measurements(): | |
global measurement_points | |
measurement_points = [] | |
actions = ["Upload", "Detect Rooms", "Detect Doors/Windows"] | |
action_index = 0 | |
undo_button = gr.Button("Undo") | |
redo_button = gr.Button("Redo") | |
class ActionManager: | |
def __init__(self, actions): | |
self.actions = actions | |
self.current_index = 0 | |
self.action_log = [] | |
def perform_action(self): | |
"""Perform the next action and log it.""" | |
self.current_index = (self.current_index + 1) % len(self.actions) | |
action = self.actions[self.current_index] | |
self.action_log.append(f"Performed: {action}") | |
return action | |
def undo_action(self): | |
"""Undo the last action and log it.""" | |
self.current_index = (self.current_index - 1) % len(self.actions) | |
action = self.actions[self.current_index] | |
self.action_log.append(f"Undone: {action}") | |
return action | |
def get_action_log(self): | |
"""Return a formatted log of actions.""" | |
return "\n".join(self.action_log) | |
action_manager = ActionManager(actions=["Upload", "Detect Rooms", "Detect Doors/Windows"]) | |
with gr.Blocks() as demo: | |
gr.Markdown("# Advanced Floor Plan Detection") | |
gr.Markdown("Upload a floor plan to detect rooms, doors, and windows. Choose detection layers and highlight them with your preferred color.") | |
with gr.Row(): | |
image_input = gr.Image(type="pil", label="Upload Floor Plan") | |
zoom_factor = gr.Slider(minimum=0.1, maximum=2.0, step=0.1, value=1.0, label="Zoom Factor") | |
color_choice = gr.Dropdown(choices=["Red", "Green", "Blue", "Yellow"], label="Detection Color") | |
selected_layers = gr.Dropdown(choices=["Room Detection", "Doors and Windows Detection"], multiselect=True, label="Select Layers") | |
with gr.Row(): | |
gallery_output = gr.Gallery(label="Detected Layers") | |
detection_counts_output = gr.Text(label="Detection Counts (JSON)") | |
with gr.Row(): | |
undo_button = gr.Button("Undo") | |
redo_button = gr.Button("Redo") | |
action_output = gr.Textbox(label="Current Action", value=action_manager.actions[action_manager.current_index], interactive=False) | |
def handle_action(action_type): | |
if action_type == "undo": | |
return action_manager.undo_action() | |
elif action_type == "redo": | |
return action_manager.perform_action() | |
undo_button.click( | |
lambda: handle_action("undo"), | |
inputs=[], | |
outputs=action_output | |
) | |
redo_button.click( | |
lambda: handle_action("redo"), | |
inputs=[], | |
outputs=action_output | |
) | |
process_button = gr.Button("Process Floor Plan") | |
process_button.click( | |
process_floor_plan, | |
inputs=[image_input, zoom_factor, color_choice, selected_layers], | |
outputs=[gallery_output, detection_counts_output] | |
) | |
with gr.Row(): | |
upload = gr.Image(type="pil", label="Upload Floor Plan") | |
detect_button = gr.Button("Detect & Suggest Improvements") | |
with gr.Row(): | |
detection_output = gr.Gallery(label="Room Detection Results") | |
suggestion_output = gr.Textbox(label="Improvement Suggestions", lines=5) | |
demo.launch(share=True) |