Spaces:
Sleeping
Sleeping
File size: 12,990 Bytes
35d85a5 99c5651 950ad22 980e49a ebff05b 35d85a5 ebff05b bc563e5 99c5651 9a4f3b6 980e49a 83f3406 d771e6b bc563e5 3c2a16e c968130 3c2a16e c968130 3c2a16e c968130 bc563e5 35d85a5 359a4fd 99c5651 35d85a5 950ad22 980e49a 950ad22 980e49a 950ad22 35d85a5 8058707 35d85a5 8058707 35d85a5 950ad22 35d85a5 8058707 35d85a5 8058707 83f3406 d771e6b 83f3406 950ad22 83f3406 bc563e5 83f3406 8058707 d771e6b 950ad22 83f3406 35d85a5 bc563e5 bb55ca1 b49271d bc563e5 bb55ca1 b49271d bc563e5 35d85a5 5f75ed2 99c5651 35d85a5 8058707 5f75ed2 35d85a5 8058707 35d85a5 8058707 99c5651 35d85a5 8058707 35d85a5 980e49a 35d85a5 8058707 35d85a5 8058707 35d85a5 f218ae3 5f75ed2 35d85a5 99c5651 |
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 168 169 170 171 172 173 174 175 176 177 178 179 180 181 182 183 184 185 186 187 188 189 190 191 192 193 194 195 196 197 198 199 200 201 202 203 204 205 206 207 208 209 210 211 212 213 214 215 216 217 218 219 220 221 222 223 224 225 226 227 228 229 230 231 232 233 234 235 236 237 238 239 240 241 242 243 244 245 246 247 248 249 250 251 252 253 254 255 256 257 258 259 260 261 262 263 264 265 266 267 268 269 270 271 272 273 274 275 276 277 278 279 280 281 282 283 284 285 286 287 288 289 290 291 292 293 294 295 296 297 298 299 300 301 302 303 304 305 306 307 308 309 310 311 312 313 314 315 316 317 318 319 320 321 322 323 324 325 326 327 328 329 330 331 332 333 334 335 336 337 338 339 340 341 342 |
import streamlit as st
import sys
import os
import shutil
import time
from datetime import datetime
import csv
import cv2
import numpy as np
from PIL import Image
import torch
from huggingface_hub import HfApi
# Adjust import paths as needed
sys.path.append('Utils')
sys.path.append('model')
from model.CBAM.reunet_cbam import reunet_cbam
from model.transform import transforms
from model.unet import UNET
from Utils.area import pixel_to_sqft, process_and_overlay_image
from split_merge import split, merge
from Utils.convert import read_pansharpened_rgb
# Initialize Hugging Face API
hf_api = HfApi()
# Get the token from secrets
HF_TOKEN = st.secrets.get("HF_TOKEN")
if not HF_TOKEN:
st.error("HF_TOKEN not found in secrets. Please set it in your Space's Configuration > Secrets.")
st.stop()
# Your Space ID (this should match exactly with your Hugging Face Space URL)
REPO_ID = "Pavan2k4/Building_area"
REPO_TYPE = "space"
# Define subdirectories
UPLOAD_DIR = "uploaded_images"
MASK_DIR = "generated_masks"
PATCHES_DIR = "patches"
PRED_PATCHES_DIR = "pred_patches"
CSV_LOG_PATH = "image_log.csv"
def split(image, destination=PATCHES_DIR, patch_size=650):
img = cv2.imread(image)
h, w, _ = img.shape
for y in range(0, h, patch_size):
for x in range(0, w, patch_size):
patch = img[y:y+patch_size, x:x+patch_size]
patch_filename = f"patch_{y}_{x}.png"
patch_path = os.path.join(destination, patch_filename)
cv2.imwrite(patch_path, patch)
def merge(patch_folder, dest_image='out.png', image_shape=None):
merged = np.zeros(image_shape[:-1] + (3,), dtype=np.uint8)
for filename in os.listdir(patch_folder):
if filename.endswith(".png"):
patch_path = os.path.join(patch_folder, filename)
patch = cv2.imread(patch_path)
patch_height, patch_width, _ = patch.shape
# Extract patch coordinates from filename
parts = filename.split("_")
x, y = None, None
for part in parts:
if part.endswith(".png"):
x = int(part.split(".")[0])
elif part.isdigit():
y = int(part)
if x is None or y is None:
raise ValueError(f"Invalid filename: {filename}")
# Check if patch fits within image boundaries
if x + patch_width > image_shape[1] or y + patch_height > image_shape[0]:
# Adjust patch position to fit within image boundaries
if x + patch_width > image_shape[1]:
x = image_shape[1] - patch_width
if y + patch_height > image_shape[0]:
y = image_shape[0] - patch_height
# Merge patch into the main image
merged[y:y+patch_height, x:x+patch_width, :] = patch
cv2.imwrite(dest_image, merged)
# Create directories
for directory in [UPLOAD_DIR, MASK_DIR, PATCHES_DIR, PRED_PATCHES_DIR]:
os.makedirs(directory, exist_ok=True)
# Load model
@st.cache_resource
def load_model():
model = reunet_cbam()
model.load_state_dict(torch.load('latest.pth', map_location='cpu')['model_state_dict'])
model.eval()
return model
model = load_model()
def predict(image):
with torch.no_grad():
output = model(image.unsqueeze(0))
return output.squeeze().cpu().numpy()
def save_to_hf_repo(local_path, repo_path):
try:
hf_api.upload_file(
path_or_fileobj=local_path,
path_in_repo=repo_path,
repo_id=REPO_ID,
repo_type=REPO_TYPE,
token=HF_TOKEN
)
except Exception as e:
st.error(f"Error uploading file: {str(e)}")
st.error("Detailed error information:")
st.exception(e)
def log_image_details(image_id, image_filename, mask_filename):
file_exists = os.path.exists(CSV_LOG_PATH)
current_time = datetime.now()
date = current_time.strftime('%Y-%m-%d')
time = current_time.strftime('%H:%M:%S')
with open(CSV_LOG_PATH, mode='a', newline='') as file:
writer = csv.writer(file)
if not file_exists:
writer.writerow(['S.No', 'Date', 'Time', 'Image ID', 'Image Filename', 'Mask Filename'])
# Get the next S.No
if file_exists:
with open(CSV_LOG_PATH, mode='r') as f:
reader = csv.reader(f)
sno = sum(1 for row in reader)
else:
sno = 1
writer.writerow([sno, date, time, image_id, image_filename, mask_filename])
# Save CSV to Hugging Face repo
save_to_hf_repo(CSV_LOG_PATH, 'image_log.csv')
def upload_page():
if 'file_uploaded' not in st.session_state:
st.session_state.file_uploaded = False
if 'filename' not in st.session_state:
st.session_state.filename = None
if 'mask_filename' not in st.session_state:
st.session_state.mask_filename = None
image = st.file_uploader('Choose a satellite image', type=['jpg', 'png', 'jpeg', 'tiff', 'tif'])
if image is not None and not st.session_state.file_uploaded:
try:
bytes_data = image.getvalue()
timestamp = int(time.time())
original_filename = image.name
file_extension = os.path.splitext(original_filename)[1].lower()
if file_extension in ['.tiff', '.tif']:
filename = f"image_{timestamp}.tif"
converted_filename = f"image_{timestamp}_converted.png"
else:
filename = f"image_{timestamp}.png"
converted_filename = filename
filepath = os.path.join(UPLOAD_DIR, filename)
converted_filepath = os.path.join(UPLOAD_DIR, converted_filename)
with open(filepath, "wb") as f:
f.write(bytes_data)
st.success('File uploaded and saved')
# Save image to Hugging Face repo
save_to_hf_repo(filepath, f'uploaded_images/{filename}')
# Check if the uploaded file is a GeoTIFF
if file_extension in ['.tiff', '.tif']:
st.info('Processing GeoTIFF image...')
rgb_image = read_pansharpened_rgb(filepath)
cv2.imwrite(converted_filepath, cv2.cvtColor(rgb_image, cv2.COLOR_RGB2BGR))
img = Image.open(converted_filepath)
else:
img = Image.open(filepath)
st.image(img, caption='Uploaded Image', use_column_width=True)
# Store the full path of the converted image
st.session_state.filename = converted_filename
# Convert image to numpy array
img_array = np.array(img)
# Check if image shape is more than 650x650
if img_array.shape[0] > 650 or img_array.shape[1] > 650:
# Split image into patches
split(converted_filepath, patch_size=512)
# Display buffer while analyzing
with st.spinner('Analyzing...'):
# Predict on each patch
for patch_filename in os.listdir(PATCHES_DIR):
if patch_filename.endswith(".png"):
patch_path = os.path.join(PATCHES_DIR, patch_filename)
patch_img = Image.open(patch_path)
patch_tr_img = transforms(patch_img)
prediction = predict(patch_tr_img)
mask = (prediction > 0.5).astype(np.uint8) * 255
mask_filename = f"mask_{patch_filename}"
mask_filepath = os.path.join(PRED_PATCHES_DIR, mask_filename)
Image.fromarray(mask).save(mask_filepath)
# Merge predicted patches
merged_mask_filename = f"mask_{timestamp}.png"
merged_mask_path = os.path.join(MASK_DIR, merged_mask_filename)
merge(PRED_PATCHES_DIR, merged_mask_path, img_array.shape)
# Save merged mask
st.session_state.mask_filename = merged_mask_filename
# Clean up temporary patch files
st.info('Cleaning up temporary files...')
shutil.rmtree(PATCHES_DIR)
shutil.rmtree(PRED_PATCHES_DIR)
os.makedirs(PATCHES_DIR) # Recreate empty folders
os.makedirs(PRED_PATCHES_DIR)
else:
# Predict on whole image
st.session_state.tr_img = transforms(img)
prediction = predict(st.session_state.tr_img)
mask = (prediction > 0.5).astype(np.uint8) * 255
mask_filename = f"mask_{timestamp}.png"
mask_filepath = os.path.join(MASK_DIR, mask_filename)
Image.fromarray(mask).save(mask_filepath)
st.session_state.mask_filename = mask_filename
st.success('Mask generated and saved')
# Save mask to Hugging Face repo
mask_filepath = os.path.join(MASK_DIR, st.session_state.mask_filename)
save_to_hf_repo(mask_filepath, f'generated_masks/{st.session_state.mask_filename}')
# Log image details
log_image_details(timestamp, converted_filename, st.session_state.mask_filename)
st.session_state.file_uploaded = True
except Exception as e:
st.error(f"An error occurred: {str(e)}")
st.error("Please check the logs for more details.")
print(f"Error in upload_page: {str(e)}") # This will appear in the Streamlit logs
for filename in os.listdir(PATCHES_DIR):
file_path = os.path.join(PATCHES_DIR, filename)
if os.path.isfile(file_path):
os.remove(file_path)
for filename in os.listdir(PRED_PATCHES_DIR):
file_path = os.path.join(PRED_PATCHES_DIR, filename)
if os.path.isfile(file_path):
os.remove(file_path)
if st.session_state.file_uploaded and st.button('View result'):
if st.session_state.filename is None:
st.error("Please upload an image before viewing the result.")
else:
st.success('Image analyzed')
st.session_state.page = 'result'
st.rerun()
def result_page():
st.title('Analysis Result')
if 'filename' not in st.session_state or 'mask_filename' not in st.session_state:
st.error("No image or mask file found. Please upload and process an image first.")
if st.button('Back to Upload'):
st.session_state.page = 'upload'
st.session_state.file_uploaded = False
st.rerun()
return
col1, col2 = st.columns(2)
# Display original image
original_img_path = os.path.join(UPLOAD_DIR, st.session_state.filename)
if os.path.exists(original_img_path):
original_img = Image.open(original_img_path)
col1.image(original_img, caption='Original Image', use_column_width=True)
else:
col1.error(f"Original image file not found: {original_img_path}")
# Display predicted mask
mask_path = os.path.join(MASK_DIR, st.session_state.mask_filename)
if os.path.exists(mask_path):
mask = Image.open(mask_path)
col2.image(mask, caption='Predicted Mask', use_column_width=True)
else:
col2.error(f"Predicted mask file not found: {mask_path}")
st.subheader("Overlay with Area of Buildings (sqft)")
# Display overlayed image
if os.path.exists(original_img_path) and os.path.exists(mask_path):
original_np = cv2.imread(original_img_path)
mask_np = cv2.imread(mask_path, cv2.IMREAD_GRAYSCALE)
# Ensure mask is binary
_, mask_np = cv2.threshold(mask_np, 127, 255, cv2.THRESH_BINARY)
# Resize mask to match original image size if necessary
if original_np.shape[:2] != mask_np.shape[:2]:
mask_np = cv2.resize(mask_np, (original_np.shape[1], original_np.shape[0]))
# Process and overlay image
overlay_img = process_and_overlay_image(original_np, mask_np, 'output.png')
st.image(overlay_img, caption='Overlay Image', use_column_width=True)
else:
st.error("Image or mask file not found for overlay.")
if st.button('Back to Upload'):
st.session_state.page = 'upload'
st.session_state.file_uploaded = False
st.rerun()
def main():
st.title('Building area estimation')
if 'page' not in st.session_state:
st.session_state.page = 'upload'
if st.session_state.page == 'upload':
upload_page()
elif st.session_state.page == 'result':
result_page()
if __name__ == '__main__':
main() |