Spaces:
Sleeping
Sleeping
File size: 13,331 Bytes
35d85a5 99c5651 7d10728 3d88066 ebff05b 35d85a5 faf25b4 ebff05b 99c5651 1eb98c7 4a30d00 3d88066 4a30d00 d91bc10 9aaf26e b4bbe6a 3d88066 b4bbe6a 1a049e0 980e49a faf25b4 bc563e5 3529767 bc563e5 35d85a5 3529767 d91bc10 99c5651 35d85a5 2ce23f2 35d85a5 8058707 35d85a5 8058707 35d85a5 843d942 35d85a5 8058707 35d85a5 8058707 83f3406 b4bbe6a 83f3406 b4bbe6a 3d88066 b4bbe6a 3d88066 559a6f4 83f3406 0a85650 83f3406 0a85650 83f3406 559a6f4 1eb98c7 559a6f4 2ce23f2 b4bbe6a 3d88066 2ce23f2 950ad22 83f3406 559a6f4 2ce23f2 83f3406 faf25b4 35d85a5 5f75ed2 99c5651 35d85a5 8058707 0a85650 5f75ed2 35d85a5 8058707 35d85a5 8058707 99c5651 35d85a5 8058707 35d85a5 980e49a 35d85a5 8058707 35d85a5 8058707 35d85a5 f218ae3 0a85650 5f75ed2 35d85a5 7c28b17 |
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 168 169 170 171 172 173 174 175 176 177 178 179 180 181 182 183 184 185 186 187 188 189 190 191 192 193 194 195 196 197 198 199 200 201 202 203 204 205 206 207 208 209 210 211 212 213 214 215 216 217 218 219 220 221 222 223 224 225 226 227 228 229 230 231 232 233 234 235 236 237 238 239 240 241 242 243 244 245 246 247 248 249 250 251 252 253 254 255 256 257 258 259 260 261 262 263 264 265 266 267 268 269 270 271 272 273 274 275 276 277 278 279 280 281 282 283 284 285 286 287 288 289 290 291 292 293 294 295 296 297 298 299 300 301 302 303 304 305 306 307 308 309 310 311 312 313 314 315 316 317 318 319 320 321 322 323 324 325 326 327 328 329 330 331 332 333 334 335 336 337 338 339 340 341 342 343 344 345 346 347 348 349 350 351 352 353 354 355 356 357 358 359 360 361 362 363 |
import streamlit as st
import sys
import os
import shutil
import time
from datetime import datetime
import csv
import cv2
import numpy as np
from PIL import Image
import torch
from huggingface_hub import hf_hub_download
sys.path.append('Utils')
sys.path.append('model')
from model.CBAM.reunet_cbam import reunet_cbam
from model.transform import transforms
from model.unet import UNET
from Utils.area import pixel_to_sqft, process_and_overlay_image
from Utils.convert import read_pansharpened_rgb
from clean_refine import clean_mask,refine_mask
from huggingface_hub import HfApi, login
import os
# Set up Hugging Face authentication
HF_TOKEN = os.environ.get("HF_TOKEN")
if not HF_TOKEN:
raise ValueError("HF_TOKEN environment variable is not set")
login(token=HF_TOKEN)
hf_api = HfApi()
REPO_ID = "Pavan2k4/Building_area"
REPO_TYPE = "space"
@st.cache_resource
def load_model():
model = reunet_cbam()
model.load_state_dict(torch.load('latest.pth', map_location='cpu')['model_state_dict'])
model.eval()
return model
# save to dir func
def save_to_hf_repo(local_path, repo_path):
try:
hf_api.upload_file(
path_or_fileobj=local_path,
path_in_repo=repo_path,
repo_id=REPO_ID,
repo_type=REPO_TYPE,
token=HF_TOKEN
)
st.success(f"File uploaded successfully to {repo_path}")
except Exception as e:
st.error(f"Error uploading file: {str(e)}")
st.error("Detailed error information:")
st.exception(e)
BASE_DIR = os.getcwd()
# Define subdirectories
UPLOAD_DIR = os.path.join(BASE_DIR, "uploaded_images")
MASK_DIR = os.path.join(BASE_DIR, "generated_masks")
PATCHES_DIR = os.path.join(BASE_DIR, "patches")
PRED_PATCHES_DIR = os.path.join(BASE_DIR, "pred_patches")
CSV_LOG_PATH = os.path.join(BASE_DIR, "image_log.csv")
# Create directories
for directory in [UPLOAD_DIR, MASK_DIR, PATCHES_DIR, PRED_PATCHES_DIR]:
os.makedirs(directory, exist_ok=True)
# Load model
model = load_model()
def predict(image):
with torch.no_grad():
output = model(image.unsqueeze(0))
return output.squeeze().cpu().numpy()
def split_image(image, patch_size=512):
h, w, _ = image.shape
patches = []
for y in range(0, h, patch_size):
for x in range(0, w, patch_size):
patch = image[y:min(y+patch_size, h), x:min(x+patch_size, w)]
patches.append((f"patch_{y}_{x}.png", patch))
return patches
def merge(patch_folder, dest_image='out.png', image_shape=None):
merged = np.zeros(image_shape[:-1] + (3,), dtype=np.uint8)
for filename in os.listdir(patch_folder):
if filename.endswith(".png"):
patch_path = os.path.join(patch_folder, filename)
patch = cv2.imread(patch_path)
patch_height, patch_width, _ = patch.shape
# Extract patch coordinates from filename
parts = filename.split("_")
x, y = None, None
for part in parts:
if part.endswith(".png"):
x = int(part.split(".")[0])
elif part.isdigit():
y = int(part)
if x is None or y is None:
raise ValueError(f"Invalid filename: {filename}")
# Check if patch fits within image boundaries
if x + patch_width > image_shape[1] or y + patch_height > image_shape[0]:
# Adjust patch position to fit within image boundaries
if x + patch_width > image_shape[1]:
x = image_shape[1] - patch_width
if y + patch_height > image_shape[0]:
y = image_shape[0] - patch_height
# Merge patch into the main image
merged[y:y+patch_height, x:x+patch_width, :] = patch
cv2.imwrite(dest_image, merged)
return merged
def process_large_image(model, image_path, patch_size=512):
# Read the image
img = cv2.imread(image_path)
if img is None:
raise ValueError(f"Failed to read image from {image_path}")
h, w, _ = img.shape
st.write(f"Processing image of size {w}x{h}")
# Split the image into patches
patches = split_image(img, patch_size)
# Process each patch
for filename, patch in patches:
patch_pil = Image.fromarray(cv2.cvtColor(patch, cv2.COLOR_BGR2RGB))
patch_transformed = transforms(patch_pil)
prediction = predict(patch_transformed)
mask = (prediction > 0.5).astype(np.uint8) * 255
# Save the mask patch
mask_filepath = os.path.join(PRED_PATCHES_DIR, filename)
cv2.imwrite(mask_filepath, mask)
# Merge the predicted patches
merged_mask = merge(PRED_PATCHES_DIR, dest_image='merged_mask.png', image_shape=img.shape)
return merged_mask
def log_image_details(image_id, image_filename, mask_filename):
file_exists = os.path.exists(CSV_LOG_PATH)
current_time = datetime.now()
date = current_time.strftime('%Y-%m-%d')
time = current_time.strftime('%H:%M:%S')
with open(CSV_LOG_PATH, mode='a', newline='') as file:
writer = csv.writer(file)
if not file_exists:
writer.writerow(['S.No', 'Date', 'Time', 'Image ID', 'Image Filename', 'Mask Filename'])
# Get the next S.No
if file_exists:
with open(CSV_LOG_PATH, mode='r') as f:
reader = csv.reader(f)
sno = sum(1 for row in reader)
else:
sno = 1
writer.writerow([sno, date, time, image_id, image_filename, mask_filename])
def upload_page():
if 'file_uploaded' not in st.session_state:
st.session_state.file_uploaded = False
if 'filename' not in st.session_state:
st.session_state.filename = None
if 'mask_filename' not in st.session_state:
st.session_state.mask_filename = None
image = st.file_uploader('Choose a satellite image', type=['jpg', 'png', 'jpeg', 'tiff', 'tif'])
if image is not None and not st.session_state.file_uploaded:
try:
bytes_data = image.getvalue()
timestamp = int(time.time())
original_filename = image.name
file_extension = os.path.splitext(original_filename)[1].lower()
if file_extension in ['.tiff', '.tif']:
filename = f"image_{timestamp}.tif"
converted_filename = f"image_{timestamp}_converted.png"
else:
filename = f"image_{timestamp}.png"
converted_filename = filename
filepath = os.path.join(UPLOAD_DIR, filename)
converted_filepath = os.path.join(UPLOAD_DIR, converted_filename)
with open(filepath, "wb") as f:
f.write(bytes_data)
#st.success(f"Image saved to {filepath}")
# Save image to Hugging Face repo----------------------------------------------------------------------------------------------------------------------------------
# Save image to Hugging Face repo
try:
image_repo_path = f"images/{converted_filename}"
save_to_hf_repo(converted_filepath, image_repo_path)
except Exception as e:
st.error(f"Error saving image to Hugging Face repo: {str(e)}")
# Check if the uploaded file is a GeoTIFF
if file_extension in ['.tiff', '.tif']:
st.info('Processing GeoTIFF image...')
rgb_image = read_pansharpened_rgb(filepath)
cv2.imwrite(converted_filepath, cv2.cvtColor(rgb_image, cv2.COLOR_RGB2BGR))
st.success(f'GeoTIFF converted to 8-bit image and saved as {converted_filename}')
img = Image.open(converted_filepath)
else:
img = Image.open(filepath)
st.image(img, caption='Uploaded Image', use_column_width=True)
st.success(f'Image processed and saved as {converted_filename}')
# Store the full path of the converted image
st.session_state.filename = converted_filename
# Convert image to numpy array
img_array = np.array(img)
# Check if image shape is more than 650x650
if img_array.shape[0] > 650 or img_array.shape[1] > 650:
st.info('Large image detected. Using patch-based processing.')
with st.spinner('Analyzing large image...'):
full_mask = process_large_image(model, converted_filepath)
else:
st.info('Small image detected. Processing whole image at once.')
with st.spinner('Analyzing image...'):
img_transformed = transforms(img)
prediction = predict(img_transformed)
full_mask = (prediction > 0.5).astype(np.uint8) * 255
full_mask = clean_mask(full_mask, morph_kernel_size=3, min_object_size=50)
full_mask = refine_mask(full_mask, blur_kernel=5, edge_kernel=3, threshold_value=127)
# Save the full mask
mask_filename = f"mask_{timestamp}.png"
mask_filepath = os.path.join(MASK_DIR, mask_filename)
cv2.imwrite(mask_filepath, full_mask)
st.session_state.mask_filename = mask_filename
# Save mask to Hugging Face repo---------------------------------------------------------------------------------------------
# Save mask to Hugging Face repo
try:
mask_repo_path = f"masks/{mask_filename}"
save_to_hf_repo(mask_filepath, mask_repo_path)
except Exception as e:
st.error(f"Error saving mask to Hugging Face repo: {str(e)}")
# Log image details
log_image_details(timestamp, converted_filename, mask_filename)
st.session_state.file_uploaded = True
st.success("Image processed successfully")
except Exception as e:
st.error(f"An error occurred: {str(e)}")
st.error("Please check the logs for more details.")
print(f"Error in upload_page: {str(e)}") # This will appear in the Streamlit logs
if st.session_state.file_uploaded and st.button('View result'):
if st.session_state.filename is None:
st.error("Please upload an image before viewing the result.")
else:
st.success('Image analyzed')
st.session_state.page = 'result'
st.rerun()
def result_page():
st.title('Analysis Result')
if 'filename' not in st.session_state or 'mask_filename' not in st.session_state:
st.error("No image or mask file found. Please upload and process an image first.")
if st.button('Back to Upload'):
st.session_state.page = 'upload'
st.session_state.file_uploaded = False
st.session_state.filename = None
st.session_state.mask_filename = None
st.rerun()
return
col1, col2 = st.columns(2)
# Display original image
original_img_path = os.path.join(UPLOAD_DIR, st.session_state.filename)
if os.path.exists(original_img_path):
original_img = Image.open(original_img_path)
col1.image(original_img, caption='Original Image', use_column_width=True)
else:
col1.error(f"Original image file not found: {original_img_path}")
# Display predicted mask
mask_path = os.path.join(MASK_DIR, st.session_state.mask_filename)
if os.path.exists(mask_path):
mask = Image.open(mask_path)
col2.image(mask, caption='Predicted Mask', use_column_width=True)
else:
col2.error(f"Predicted mask file not found: {mask_path}")
st.subheader("Overlay with Area of Buildings (sqft)")
# Display overlayed image
if os.path.exists(original_img_path) and os.path.exists(mask_path):
original_np = cv2.imread(original_img_path)
mask_np = cv2.imread(mask_path, cv2.IMREAD_GRAYSCALE)
# Ensure mask is binary
_, mask_np = cv2.threshold(mask_np, 127, 255, cv2.THRESH_BINARY)
# Resize mask to match original image size if necessary
if original_np.shape[:2] != mask_np.shape[:2]:
mask_np = cv2.resize(mask_np, (original_np.shape[1], original_np.shape[0]))
# Process and overlay image
overlay_img = process_and_overlay_image(original_np, mask_np, 'output.png')
st.image(overlay_img, caption='Overlay Image', use_column_width=True)
else:
st.error("Image or mask file not found for overlay.")
if st.button('Back to Upload'):
st.session_state.page = 'upload'
st.session_state.file_uploaded = False
st.session_state.filename = None
st.session_state.mask_filename = None
st.rerun()
def main():
st.title('Building area estimation')
if 'page' not in st.session_state:
st.session_state.page = 'upload'
if st.session_state.page == 'upload':
upload_page()
elif st.session_state.page == 'result':
result_page()
if __name__ == '__main__':
main() |