File size: 7,799 Bytes
ad595c8
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
428a02a
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
from inference_sdk import InferenceHTTPClient
from PIL import Image, ImageDraw
import gradio as gr
import tempfile
import json
from gradio import Blocks


def greet(name):
    return "Hello " + name + "!!"

# Initialize Roboflow Clients
ROOM_CLIENT = InferenceHTTPClient(api_url="https://outline.roboflow.com", api_key="qwnldFeqXRNGRd2T4vZy")
DOOR_WINDOW_CLIENT = InferenceHTTPClient(api_url="https://detect.roboflow.com", api_key="qwnldFeqXRNGRd2T4vZy")
color_options = ["Red", "Green", "Blue", "Yellow"]    
layer_options = ["Room Detection", "Doors and Windows Detection"]

def apply_zoom(image, zoom_factor):
    width, height = image.size
    new_width = int(width * zoom_factor)    
    new_height = int(height * zoom_factor)
    return image.resize((new_width, new_height))

def detect_and_draw(image_path, model_id, client, filter_classes=None,color_choice=None):
    result = client.infer(image_path, model_id=model_id)
    with open('result.json', "w") as json_file:
            json.dump(result, json_file, indent=4)
    original_img = Image.open(image_path)
    overlayed_img = original_img.copy()
    draw = ImageDraw.Draw(overlayed_img)

    # Dictionary to hold counts of all detected classes
    counts = {}

    for prediction in result.get('predictions', []):
        # Extract class name
        pred_class = prediction.get('class', '').lower()

        # If filter_classes is provided, skip classes not in the list
        if filter_classes and pred_class not in filter_classes:
            continue

        # Increment the count for the class in the dictionary
        counts[pred_class] = counts.get(pred_class, 0) + 1

        # Extract bounding box coordinates
        x = int(prediction['x'] - prediction['width'] / 2)
        y = int(prediction['y'] - prediction['height'] / 2)
        width = int(prediction['width'])
        height = int(prediction['height'])

        # Draw rectangle
        draw.rectangle([x, y, x + width, y + height], outline=color_choice, width=2)

        # Add label
        label = f"{pred_class}"
        draw.text((x, y - 10), label, fill=color_choice)

    return overlayed_img, counts

def process_floor_plan(image,zoom_factor,color_choice,selected_layers):
    try:
    # Save the uploaded image to a temporary file
        with tempfile.NamedTemporaryFile(suffix=".png", delete=False) as temp_file:
            image.save(temp_file.name)
            temp_file_path = temp_file.name

        zoomed_image = apply_zoom(Image.open(temp_file_path), zoom_factor)
        zoomed_image.save(temp_file_path)

        layers_to_process = []
        # Detect rooms
        room_overlay, room_counts = detect_and_draw(temp_file_path, "room-segmentation-frntt/1", ROOM_CLIENT, filter_classes=["room"],color_choice=color_choice)
        layers_to_process.append(("Room Detection", room_overlay))
        # Detect doors and windows
        dw_overlay, dw_counts = detect_and_draw(temp_file_path, "door-detection-model/2", DOOR_WINDOW_CLIENT, filter_classes=["door", "window"],color_choice=color_choice)
        layers_to_process.append(("Doors and Windows Detection", dw_overlay))   

        outputs = []
        combined_counts = {}

        # Process each selected layer
        if "Room Detection" in selected_layers:
            room_overlay, room_counts = detect_and_draw(
                temp_file_path, "room-segmentation-frntt/1", ROOM_CLIENT, filter_classes=["room"], color_choice=color_choice
            )
            outputs.append(("Room Detection", room_overlay))
            combined_counts.update(room_counts)

        if "Doors and Windows Detection" in selected_layers:
            dw_overlay, dw_counts = detect_and_draw(
                temp_file_path, "door-detection-model/2", DOOR_WINDOW_CLIENT, filter_classes=["door", "window"], color_choice=color_choice
            )
            outputs.append(("Doors and Windows Detection", dw_overlay))
            combined_counts.update(dw_counts)

        return [img[1] for img in outputs], json.dumps(combined_counts, indent=4)

    except Exception as e:
        print(f"Error processing floor plan: {e}")
        return [], json.dumps({"error": str(e)}, indent=4)

measurement_points = []
def add_measurement_point(x, y):
    global measurement_points
    measurement_points.append((x, y))
    if len(measurement_points) == 2:
        distance = ((measurement_points[1][0] - measurement_points[0][0])**2 + 
                    (measurement_points[1][1] - measurement_points[0][1])**2)**0.5
        print(f"Distance between points: {distance:.2f} pixels")

def clear_measurements():
    global measurement_points
    measurement_points = []

actions = ["Upload", "Detect Rooms", "Detect Doors/Windows"]
action_index = 0
undo_button = gr.Button("Undo")
redo_button = gr.Button("Redo")

class ActionManager:
    def __init__(self, actions):
        self.actions = actions
        self.current_index = 0
        self.action_log = []

    def perform_action(self):
        """Perform the next action and log it."""
        self.current_index = (self.current_index + 1) % len(self.actions)
        action = self.actions[self.current_index]
        self.action_log.append(f"Performed: {action}")
        return action

    def undo_action(self):
        """Undo the last action and log it."""
        self.current_index = (self.current_index - 1) % len(self.actions)
        action = self.actions[self.current_index]
        self.action_log.append(f"Undone: {action}")
        return action

    def get_action_log(self):
        """Return a formatted log of actions."""
        return "\n".join(self.action_log)

action_manager = ActionManager(actions=["Upload", "Detect Rooms", "Detect Doors/Windows"])

with gr.Blocks() as demo:
    gr.Markdown("# Advanced Floor Plan Detection")
    gr.Markdown("Upload a floor plan to detect rooms, doors, and windows. Choose detection layers and highlight them with your preferred color.")

    with gr.Row():
        image_input = gr.Image(type="pil", label="Upload Floor Plan")
        zoom_factor = gr.Slider(minimum=0.1, maximum=2.0, step=0.1, value=1.0, label="Zoom Factor")
        color_choice = gr.Dropdown(choices=["Red", "Green", "Blue", "Yellow"], label="Detection Color")
        selected_layers = gr.Dropdown(choices=["Room Detection", "Doors and Windows Detection"], multiselect=True, label="Select Layers")

    with gr.Row():
        gallery_output = gr.Gallery(label="Detected Layers")
        detection_counts_output = gr.Text(label="Detection Counts (JSON)")

    with gr.Row():
        undo_button = gr.Button("Undo")
        redo_button = gr.Button("Redo")
        action_output = gr.Textbox(label="Current Action", value=action_manager.actions[action_manager.current_index], interactive=False)

    def handle_action(action_type):
        if action_type == "undo":
            return action_manager.undo_action()
        elif action_type == "redo":
            return action_manager.perform_action()

    undo_button.click(
        lambda: handle_action("undo"),
        inputs=[],
        outputs=action_output
    )
    redo_button.click(
        lambda: handle_action("redo"),
        inputs=[],
        outputs=action_output
    )

    process_button = gr.Button("Process Floor Plan")
    process_button.click(
        process_floor_plan,
        inputs=[image_input, zoom_factor, color_choice, selected_layers],
        outputs=[gallery_output, detection_counts_output]
    )

    with gr.Row():
        upload = gr.Image(type="pil", label="Upload Floor Plan")
        detect_button = gr.Button("Detect & Suggest Improvements")

    with gr.Row():
        detection_output = gr.Gallery(label="Room Detection Results")
        suggestion_output = gr.Textbox(label="Improvement Suggestions", lines=5)

demo.launch(share=True)