Pavan2k4 commited on
Commit
faf25b4
·
verified ·
1 Parent(s): 4604d92

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +18 -87
app.py CHANGED
@@ -1,5 +1,3 @@
1
-
2
-
3
  import streamlit as st
4
  import sys
5
  import os
@@ -11,37 +9,26 @@ import cv2
11
  import numpy as np
12
  from PIL import Image
13
  import torch
14
- from huggingface_hub import HfApi
15
 
16
  # Adjust import paths as needed
17
  sys.path.append('Utils')
18
  sys.path.append('model')
19
  from model.CBAM.reunet_cbam import reunet_cbam
20
  from model.transform import transforms
21
-
22
  from Utils.area import pixel_to_sqft, process_and_overlay_image
23
- from split_merge import merge
24
  from Utils.convert import read_pansharpened_rgb
25
 
26
- # Initialize Hugging Face API
27
- hf_api = HfApi()
28
-
29
- # Get the token from secrets
30
- HF_TOKEN = st.secrets.get("HF_TOKEN")
31
- if not HF_TOKEN:
32
- st.error("HF_TOKEN not found in secrets. Please set it in your Space's Configuration > Secrets.")
33
- st.stop()
34
 
35
- # Your Space ID (this should match exactly with your Hugging Face Space URL)
36
- REPO_ID = "Pavan2k4/Building_area"
37
- REPO_TYPE = "space"
38
-
39
- # Define subdirectories using relative paths
40
- UPLOAD_DIR = "uploaded_images"
41
- MASK_DIR = "generated_masks"
42
- PATCHES_DIR = "patches"
43
- PRED_PATCHES_DIR = "pred_patches"
44
- CSV_LOG_PATH = "image_log.csv"
45
 
46
  # Create directories
47
  for directory in [UPLOAD_DIR, MASK_DIR, PATCHES_DIR, PRED_PATCHES_DIR]:
@@ -62,21 +49,6 @@ def predict(image):
62
  output = model(image.unsqueeze(0))
63
  return output.squeeze().cpu().numpy()
64
 
65
- def save_to_hf_repo(local_path, repo_path):
66
- try:
67
- hf_api.upload_file(
68
- path_or_fileobj=local_path,
69
- path_in_repo=repo_path,
70
- repo_id=REPO_ID,
71
- repo_type=REPO_TYPE,
72
- token=HF_TOKEN
73
- )
74
- st.success(f"File uploaded successfully to {repo_path}")
75
- except Exception as e:
76
- st.error(f"Error uploading file: {str(e)}")
77
- st.error("Detailed error information:")
78
- st.exception(e)
79
-
80
  def log_image_details(image_id, image_filename, mask_filename):
81
  file_exists = os.path.exists(CSV_LOG_PATH)
82
 
@@ -98,22 +70,6 @@ def log_image_details(image_id, image_filename, mask_filename):
98
  sno = 1
99
 
100
  writer.writerow([sno, date, time, image_id, image_filename, mask_filename])
101
-
102
- # Save CSV to Hugging Face repo
103
- save_to_hf_repo(CSV_LOG_PATH, 'image_log.csv')
104
-
105
- def split(image_path, patch_size=512):
106
- img = Image.open(image_path)
107
- width, height = img.size
108
-
109
- for i in range(0, height, patch_size):
110
- for j in range(0, width, patch_size):
111
- box = (j, i, j+patch_size, i+patch_size)
112
- patch = img.crop(box)
113
- patch_filename = f"patch_{i}_{j}.png"
114
- patch_path = os.path.join(PATCHES_DIR, patch_filename)
115
- patch.save(patch_path)
116
- st.write(f"Saved patch: {patch_path}") # Debug output
117
 
118
  def upload_page():
119
  if 'file_uploaded' not in st.session_state:
@@ -150,9 +106,6 @@ def upload_page():
150
 
151
  st.success(f"Image saved to {filepath}")
152
 
153
- # Save image to Hugging Face repo
154
- save_to_hf_repo(filepath, f'uploaded_images/{filename}')
155
-
156
  # Check if the uploaded file is a GeoTIFF
157
  if file_extension in ['.tiff', '.tif']:
158
  st.info('Processing GeoTIFF image...')
@@ -172,25 +125,17 @@ def upload_page():
172
  # Convert image to numpy array
173
  img_array = np.array(img)
174
 
175
- st.write(f"Image shape: {img_array.shape}") # Debug output
176
-
177
  # Check if image shape is more than 650x650
178
  if img_array.shape[0] > 650 or img_array.shape[1] > 650:
179
- st.write("Splitting image into patches...") # Debug output
180
  # Split image into patches
181
  split(converted_filepath, patch_size=512)
182
 
183
- # Count and display the number of patches
184
- num_patches = len([f for f in os.listdir(PATCHES_DIR) if f.endswith('.png')])
185
- st.write(f"Number of patches created: {num_patches}") # Debug output
186
-
187
  # Display buffer while analyzing
188
  with st.spinner('Analyzing...'):
189
  # Predict on each patch
190
  for patch_filename in os.listdir(PATCHES_DIR):
191
  if patch_filename.endswith(".png"):
192
  patch_path = os.path.join(PATCHES_DIR, patch_filename)
193
- st.write(f"Processing patch: {patch_path}") # Debug output
194
  patch_img = Image.open(patch_path)
195
  patch_tr_img = transforms(patch_img)
196
  prediction = predict(patch_tr_img)
@@ -198,27 +143,24 @@ def upload_page():
198
  mask_filename = f"mask_{patch_filename}"
199
  mask_filepath = os.path.join(PRED_PATCHES_DIR, mask_filename)
200
  Image.fromarray(mask).save(mask_filepath)
201
- st.write(f"Saved mask: {mask_filepath}") # Debug output
202
 
203
  # Merge predicted patches
204
  merged_mask_filename = f"mask_{timestamp}.png"
205
  merged_mask_path = os.path.join(MASK_DIR, merged_mask_filename)
206
  merge(PRED_PATCHES_DIR, merged_mask_path, img_array.shape)
207
- st.write(f"Merged mask saved: {merged_mask_path}") # Debug output
208
 
209
  # Save merged mask
210
  st.session_state.mask_filename = merged_mask_filename
211
 
212
- # Clean up temporary patch files but keep the folders
213
  st.info('Cleaning up temporary files...')
214
- for file in os.listdir(PATCHES_DIR):
215
- os.remove(os.path.join(PATCHES_DIR, file))
216
- for file in os.listdir(PRED_PATCHES_DIR):
217
- os.remove(os.path.join(PRED_PATCHES_DIR, file))
218
  st.success('Temporary files cleaned up')
219
  else:
220
  # Predict on whole image
221
- st.write("Processing whole image without splitting") # Debug output
222
  st.session_state.tr_img = transforms(img)
223
  prediction = predict(st.session_state.tr_img)
224
  mask = (prediction > 0.5).astype(np.uint8) * 255
@@ -226,21 +168,13 @@ def upload_page():
226
  mask_filepath = os.path.join(MASK_DIR, mask_filename)
227
  Image.fromarray(mask).save(mask_filepath)
228
  st.session_state.mask_filename = mask_filename
229
- st.write(f"Mask saved: {mask_filepath}") # Debug output
230
-
231
- # Save mask to Hugging Face repo
232
- mask_filepath = os.path.join(MASK_DIR, st.session_state.mask_filename)
233
- save_to_hf_repo(mask_filepath, f'generated_masks/{st.session_state.mask_filename}')
234
-
235
- # Log image details
236
- log_image_details(timestamp, converted_filename, st.session_state.mask_filename)
237
 
238
  st.session_state.file_uploaded = True
239
 
240
  except Exception as e:
241
  st.error(f"An error occurred: {str(e)}")
242
  st.error("Please check the logs for more details.")
243
- st.exception(e) # This will display the full traceback in the Streamlit app
244
 
245
  if st.session_state.file_uploaded and st.button('View result'):
246
  if st.session_state.filename is None:
@@ -303,11 +237,8 @@ def result_page():
303
  st.error("Image or mask file not found for overlay.")
304
 
305
  if st.button('Back to Upload'):
306
- # Remove files from PATCHES_DIR and PRED_PATCHES_DIR without deleting the folders
307
- for file in os.listdir(PATCHES_DIR):
308
- os.remove(os.path.join(PATCHES_DIR, file))
309
- for file in os.listdir(PRED_PATCHES_DIR):
310
- os.remove(os.path.join(PRED_PATCHES_DIR, file))
311
  st.session_state.page = 'upload'
312
  st.session_state.file_uploaded = False
313
  st.session_state.filename = None
 
 
 
1
  import streamlit as st
2
  import sys
3
  import os
 
9
  import numpy as np
10
  from PIL import Image
11
  import torch
 
12
 
13
  # Adjust import paths as needed
14
  sys.path.append('Utils')
15
  sys.path.append('model')
16
  from model.CBAM.reunet_cbam import reunet_cbam
17
  from model.transform import transforms
18
+ from model.unet import UNET
19
  from Utils.area import pixel_to_sqft, process_and_overlay_image
20
+ from split_merge import split, merge
21
  from Utils.convert import read_pansharpened_rgb
22
 
23
+ # Define base directory for Hugging Face Spaces
24
+ BASE_DIR = "/home/user"
 
 
 
 
 
 
25
 
26
+ # Define subdirectories
27
+ UPLOAD_DIR = os.path.join(BASE_DIR, "uploaded_images")
28
+ MASK_DIR = os.path.join(BASE_DIR, "generated_masks")
29
+ PATCHES_DIR = os.path.join(BASE_DIR, "patches")
30
+ PRED_PATCHES_DIR = os.path.join(BASE_DIR, "pred_patches")
31
+ CSV_LOG_PATH = os.path.join(BASE_DIR, "image_log.csv")
 
 
 
 
32
 
33
  # Create directories
34
  for directory in [UPLOAD_DIR, MASK_DIR, PATCHES_DIR, PRED_PATCHES_DIR]:
 
49
  output = model(image.unsqueeze(0))
50
  return output.squeeze().cpu().numpy()
51
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
52
  def log_image_details(image_id, image_filename, mask_filename):
53
  file_exists = os.path.exists(CSV_LOG_PATH)
54
 
 
70
  sno = 1
71
 
72
  writer.writerow([sno, date, time, image_id, image_filename, mask_filename])
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
73
 
74
  def upload_page():
75
  if 'file_uploaded' not in st.session_state:
 
106
 
107
  st.success(f"Image saved to {filepath}")
108
 
 
 
 
109
  # Check if the uploaded file is a GeoTIFF
110
  if file_extension in ['.tiff', '.tif']:
111
  st.info('Processing GeoTIFF image...')
 
125
  # Convert image to numpy array
126
  img_array = np.array(img)
127
 
 
 
128
  # Check if image shape is more than 650x650
129
  if img_array.shape[0] > 650 or img_array.shape[1] > 650:
 
130
  # Split image into patches
131
  split(converted_filepath, patch_size=512)
132
 
 
 
 
 
133
  # Display buffer while analyzing
134
  with st.spinner('Analyzing...'):
135
  # Predict on each patch
136
  for patch_filename in os.listdir(PATCHES_DIR):
137
  if patch_filename.endswith(".png"):
138
  patch_path = os.path.join(PATCHES_DIR, patch_filename)
 
139
  patch_img = Image.open(patch_path)
140
  patch_tr_img = transforms(patch_img)
141
  prediction = predict(patch_tr_img)
 
143
  mask_filename = f"mask_{patch_filename}"
144
  mask_filepath = os.path.join(PRED_PATCHES_DIR, mask_filename)
145
  Image.fromarray(mask).save(mask_filepath)
 
146
 
147
  # Merge predicted patches
148
  merged_mask_filename = f"mask_{timestamp}.png"
149
  merged_mask_path = os.path.join(MASK_DIR, merged_mask_filename)
150
  merge(PRED_PATCHES_DIR, merged_mask_path, img_array.shape)
 
151
 
152
  # Save merged mask
153
  st.session_state.mask_filename = merged_mask_filename
154
 
155
+ # Clean up temporary patch files
156
  st.info('Cleaning up temporary files...')
157
+ shutil.rmtree(PATCHES_DIR)
158
+ shutil.rmtree(PRED_PATCHES_DIR)
159
+ os.makedirs(PATCHES_DIR) # Recreate empty folders
160
+ os.makedirs(PRED_PATCHES_DIR)
161
  st.success('Temporary files cleaned up')
162
  else:
163
  # Predict on whole image
 
164
  st.session_state.tr_img = transforms(img)
165
  prediction = predict(st.session_state.tr_img)
166
  mask = (prediction > 0.5).astype(np.uint8) * 255
 
168
  mask_filepath = os.path.join(MASK_DIR, mask_filename)
169
  Image.fromarray(mask).save(mask_filepath)
170
  st.session_state.mask_filename = mask_filename
 
 
 
 
 
 
 
 
171
 
172
  st.session_state.file_uploaded = True
173
 
174
  except Exception as e:
175
  st.error(f"An error occurred: {str(e)}")
176
  st.error("Please check the logs for more details.")
177
+ print(f"Error in upload_page: {str(e)}") # This will appear in the Streamlit logs
178
 
179
  if st.session_state.file_uploaded and st.button('View result'):
180
  if st.session_state.filename is None:
 
237
  st.error("Image or mask file not found for overlay.")
238
 
239
  if st.button('Back to Upload'):
240
+ shutil.rmtree(PATCHES_DIR)
241
+ shutil.rmtree(PRED_PATCHES_DIR)
 
 
 
242
  st.session_state.page = 'upload'
243
  st.session_state.file_uploaded = False
244
  st.session_state.filename = None