weidai00 commited on
Commit
6c0075d
·
verified ·
1 Parent(s): 80f59cd

Upload 72 files

Browse files
This view is limited to 50 files because it contains too many changes.   See raw diff
Files changed (50) hide show
  1. .gitattributes +6 -0
  2. AV/Tools/AVclassifiation.py +202 -0
  3. AV/Tools/AVclassifiationMetrics.py +465 -0
  4. AV/Tools/BGR2RGB.py +25 -0
  5. AV/Tools/BinaryPostProcessing.py +108 -0
  6. AV/Tools/FakePad.py +115 -0
  7. AV/Tools/Float2Uint.py +18 -0
  8. AV/Tools/Hemelings_eval.py +119 -0
  9. AV/Tools/Im2Double.py +15 -0
  10. AV/Tools/ImageResize.py +214 -0
  11. AV/Tools/Remove_small_holes.py +88 -0
  12. AV/Tools/Standardize.py +45 -0
  13. AV/Tools/__init__.py +0 -0
  14. AV/Tools/__pycache__/AVclassifiation.cpython-39.pyc +0 -0
  15. AV/Tools/__pycache__/AVclassifiationMetrics.cpython-310.pyc +0 -0
  16. AV/Tools/__pycache__/AVclassifiationMetrics.cpython-39.pyc +0 -0
  17. AV/Tools/__pycache__/AVclassifiationMetrics_v1.cpython-39.pyc +0 -0
  18. AV/Tools/__pycache__/BGR2RGB.cpython-39.pyc +0 -0
  19. AV/Tools/__pycache__/BinaryPostProcessing.cpython-310.pyc +0 -0
  20. AV/Tools/__pycache__/BinaryPostProcessing.cpython-39.pyc +0 -0
  21. AV/Tools/__pycache__/ImageResize.cpython-310.pyc +0 -0
  22. AV/Tools/__pycache__/ImageResize.cpython-39.pyc +0 -0
  23. AV/Tools/__pycache__/Remove_small_holes.cpython-310.pyc +0 -0
  24. AV/Tools/__pycache__/Remove_small_holes.cpython-39.pyc +0 -0
  25. AV/Tools/__pycache__/__init__.cpython-310.pyc +0 -0
  26. AV/Tools/__pycache__/__init__.cpython-39.pyc +0 -0
  27. AV/Tools/__pycache__/data_augmentation.cpython-310.pyc +0 -0
  28. AV/Tools/__pycache__/data_augmentation.cpython-39.pyc +0 -0
  29. AV/Tools/__pycache__/evalution_vessel.cpython-310.pyc +0 -0
  30. AV/Tools/__pycache__/global2patch_AND_patch2global.cpython-39.pyc +0 -0
  31. AV/Tools/__pycache__/utils.cpython-310.pyc +0 -0
  32. AV/Tools/__pycache__/utils.cpython-39.pyc +0 -0
  33. AV/Tools/__pycache__/utils_test.cpython-39.pyc +0 -0
  34. AV/Tools/__pycache__/warmup.cpython-39.pyc +0 -0
  35. AV/Tools/centerline_evaluation.py +160 -0
  36. AV/Tools/data_augmentation.py +94 -0
  37. AV/Tools/evalution_vessel.py +75 -0
  38. AV/Tools/global2patch_AND_patch2global.py +106 -0
  39. AV/Tools/utils_test.py +353 -0
  40. AV/Tools/warmup.py +69 -0
  41. AV/config/__pycache__/config_test_general.cpython-310.pyc +0 -0
  42. AV/config/__pycache__/config_test_general.cpython-39.pyc +0 -0
  43. AV/config/__pycache__/config_train_general.cpython-310.pyc +0 -0
  44. AV/config/__pycache__/config_train_general.cpython-39.pyc +0 -0
  45. AV/config/config_test_general.py +134 -0
  46. AV/config/config_train_general.py +121 -0
  47. AV/log/ALL-2024_09_06_09_17_18(9000)/G_9000.pkl +3 -0
  48. AV/log/DRIVE-2023_10_20_08_36_50(6500)/G_6500.pkl +3 -0
  49. AV/log/HRF-2023_10_19_11_07_31(1500)/G_1500.pkl +3 -0
  50. AV/log/LES-2023_09_28_14_04_06(0)/G_0.pkl +3 -0
.gitattributes CHANGED
@@ -33,3 +33,9 @@ saved_model/**/* filter=lfs diff=lfs merge=lfs -text
33
  *.zip filter=lfs diff=lfs merge=lfs -text
34
  *.zst filter=lfs diff=lfs merge=lfs -text
35
  *tfevents* filter=lfs diff=lfs merge=lfs -text
 
 
 
 
 
 
 
33
  *.zip filter=lfs diff=lfs merge=lfs -text
34
  *.zst filter=lfs diff=lfs merge=lfs -text
35
  *tfevents* filter=lfs diff=lfs merge=lfs -text
36
+ examples/all.jpg filter=lfs diff=lfs merge=lfs -text
37
+ examples/DRIVE.tif filter=lfs diff=lfs merge=lfs -text
38
+ examples/hrf.png filter=lfs diff=lfs merge=lfs -text
39
+ examples/LES.png filter=lfs diff=lfs merge=lfs -text
40
+ examples/tmp_upload.png filter=lfs diff=lfs merge=lfs -text
41
+ examples/ukbb.png filter=lfs diff=lfs merge=lfs -text
AV/Tools/AVclassifiation.py ADDED
@@ -0,0 +1,202 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import cv2
2
+ import numpy as np
3
+ import os
4
+ # import natsort
5
+ import pandas as pd
6
+ from skimage.morphology import skeletonize, erosion, square,dilation
7
+ from AV.Tools.BinaryPostProcessing import binaryPostProcessing3
8
+ from PIL import Image
9
+ from scipy.signal import convolve2d
10
+ from collections import OrderedDict
11
+ import time
12
+ #########################################
13
+
14
+
15
+
16
+
17
+ def Skeleton(a_or_v, a_and_v):
18
+ th = np.uint8(a_and_v)
19
+ # Distance transform for maximum diameter
20
+ vessels = th.copy()
21
+ dist = cv2.distanceTransform(a_or_v, cv2.DIST_L2, 3)
22
+ thinned = np.uint8(skeletonize((vessels / 255))) * 255
23
+ return thinned, dist
24
+
25
+
26
+ def cal_crosspoint(vessel):
27
+ # Removing bifurcation points by using specially designed kernels
28
+ # Can be optimized further! (not the best implementation)
29
+ thinned1, dist = Skeleton(vessel, vessel)
30
+ thh = thinned1.copy()
31
+ thh = thh / 255
32
+ kernel1 = np.array([[1, 1, 1], [1, 10, 1], [1, 1, 1]])
33
+
34
+ th = convolve2d(thh, kernel1, mode="same")
35
+ for u in range(th.shape[0]):
36
+ for j in range(th.shape[1]):
37
+ if th[u, j] >= 13.0:
38
+ cv2.circle(vessel, (j, u), 2 * int(dist[u, j]), (0, 0, 0), -1)
39
+ # thi = cv2.cvtColor(thi, cv2.COLOR_BGR2GRAY)
40
+ return vessel
41
+
42
+
43
+ def AVclassifiation(out_path, PredAll1, PredAll2, VesselPredAll, DataSet=0, image_basename=''):
44
+ """
45
+ predAll1: predition results of artery
46
+ predAll2: predition results of vein
47
+ VesselPredAll: predition results of vessel
48
+ DataSet: the length of dataset
49
+ image_basename: the name of saved mask
50
+ """
51
+
52
+ ImgN = DataSet
53
+
54
+ for ImgNumber in range(ImgN):
55
+
56
+ height, width = PredAll1.shape[2:4]
57
+
58
+ VesselProb = VesselPredAll[ImgNumber, 0, :, :]
59
+
60
+ ArteryProb = PredAll1[ImgNumber, 0, :, :]
61
+ VeinProb = PredAll2[ImgNumber, 0, :, :]
62
+
63
+ VesselSeg = (VesselProb >= 0.1) & ((ArteryProb >0.2) | (VeinProb > 0.2))
64
+ # VesselSeg = (VesselProb >= 0.5) & ((ArteryProb >= 0.5) | (VeinProb >= 0.5))
65
+ crossSeg = (VesselProb >= 0.1) & ((ArteryProb >= 0.6) & (VeinProb >= 0.6))
66
+ VesselSeg = binaryPostProcessing3(VesselSeg, removeArea=100, fillArea=20)
67
+
68
+ vesselPixels = np.where(VesselSeg > 0)
69
+
70
+ ArteryProb2 = np.zeros((height, width))
71
+ VeinProb2 = np.zeros((height, width))
72
+ crossProb2 = np.zeros((height, width))
73
+ image_color = np.zeros((3, height, width), dtype=np.uint8)
74
+ for i in range(len(vesselPixels[0])):
75
+ row = vesselPixels[0][i]
76
+ col = vesselPixels[1][i]
77
+ probA = ArteryProb[row, col]
78
+ probV = VeinProb[row, col]
79
+ #probA,probV = softmax([probA,probV])
80
+ ArteryProb2[row, col] = probA
81
+ VeinProb2[row, col] = probV
82
+
83
+ test_use_vessel = np.zeros((height, width), np.uint8)
84
+ ArteryPred2 = ((ArteryProb2 >= 0.2) & (ArteryProb2 >= VeinProb2))
85
+ VeinPred2 = ((VeinProb2 >= 0.2) & (VeinProb2 >= ArteryProb2))
86
+
87
+ ArteryPred2 = binaryPostProcessing3(ArteryPred2, removeArea=100, fillArea=20)
88
+ VeinPred2 = binaryPostProcessing3(VeinPred2, removeArea=100, fillArea=20)
89
+
90
+ image_color[0, :, :] = ArteryPred2 * 255
91
+ image_color[2, :, :] = VeinPred2 * 255
92
+ image_color = image_color.transpose((1, 2, 0))
93
+
94
+ #Image.fromarray(image_color).save(os.path.join(out_path, f'{image_basename[ImgNumber].split(".")[0]}_ori.png'))
95
+
96
+ imgBin_vessel = ArteryPred2 + VeinPred2
97
+ imgBin_vessel[imgBin_vessel[:, :] == 2] = 1
98
+ test_use_vessel = imgBin_vessel.copy() * 255
99
+
100
+ vessel = cal_crosspoint(test_use_vessel)
101
+
102
+ contours_vessel, hierarchy_c = cv2.findContours(vessel, cv2.RETR_TREE, cv2.CHAIN_APPROX_NONE)
103
+
104
+ # inter continuity
105
+ for vessel_seg in range(len(contours_vessel)):
106
+ C_vessel = np.zeros(vessel.shape, np.uint8)
107
+ C_vessel = cv2.drawContours(C_vessel, contours_vessel, vessel_seg, (255, 255, 255), cv2.FILLED)
108
+ cli = np.mean(VeinProb2[C_vessel == 255]) / np.mean(ArteryProb2[C_vessel == 255])
109
+ if cli < 1:
110
+ image_color[
111
+ (C_vessel[:, :] == 255) & (test_use_vessel[:, :] == 255)] = [255, 0, 0]
112
+ else:
113
+ image_color[
114
+ (C_vessel[:, :] == 255) & (test_use_vessel[:, :] == 255)] = [0, 0, 255]
115
+ loop=0
116
+ while loop<2:
117
+ # out vein continuity
118
+ vein = image_color[:, :, 2]
119
+ contours_vein, hierarchy_b = cv2.findContours(vein, cv2.RETR_TREE, cv2.CHAIN_APPROX_NONE)
120
+
121
+ vein_size = []
122
+ for z in range(len(contours_vein)):
123
+ vein_size.append(contours_vein[z].size)
124
+ vein_size = np.sort(np.array(vein_size))
125
+ # image_color_copy = np.uint8(image_color).copy()
126
+ for vein_seg in range(len(contours_vein)):
127
+ judge_number = min(np.mean(vein_size),500)
128
+ # cv2.putText(image_color_copy, str(vein_seg), (int(contours_vein[vein_seg][0][0][0]), int(contours_vein[vein_seg][0][0][1])), 3, 1,
129
+ # color=(255, 0, 0), thickness=2)
130
+ if contours_vein[vein_seg].size < judge_number:
131
+ C_vein = np.zeros(vessel.shape, np.uint8)
132
+ C_vein = cv2.drawContours(C_vein, contours_vein, vein_seg, (255, 255, 255), cv2.FILLED)
133
+ max_diameter = np.max(Skeleton(C_vein, C_vein)[1])
134
+
135
+ image_color_copy_vein = image_color[:, :, 2].copy()
136
+ image_color_copy_arter = image_color[:, :, 0].copy()
137
+ # a_ori = cv2.drawContours(a_ori, contours_b, k, (0, 0, 0), cv2.FILLED)
138
+ image_color_copy_vein = cv2.drawContours(image_color_copy_vein, contours_vein, vein_seg,
139
+ (0, 0, 0),
140
+ cv2.FILLED)
141
+ # image_color[(C_cross[:, :] == 255) & (image_color[:, :, 1] == 255)] = [255, 0, 0]
142
+ kernel = cv2.getStructuringElement(cv2.MORPH_ELLIPSE, (
143
+ 4 * int(np.ceil(max_diameter)), 4 * int(np.ceil(max_diameter))))
144
+ C_vein_dilate = cv2.dilate(C_vein, kernel, iterations=1)
145
+ # cv2.imwrite(path_out_3, C_vein_dilate)
146
+ C_vein_dilate_judge = np.zeros(vessel.shape, np.uint8)
147
+ C_vein_dilate_judge[
148
+ (C_vein_dilate[:, :] == 255) & (image_color_copy_vein == 255)] = 1
149
+ C_arter_dilate_judge = np.zeros(vessel.shape, np.uint8)
150
+ C_arter_dilate_judge[
151
+ (C_vein_dilate[:, :] == 255) & (image_color_copy_arter == 255)] = 1
152
+ if (len(np.unique(C_vein_dilate_judge)) == 1) & (
153
+ len(np.unique(C_arter_dilate_judge)) != 1) & (np.mean(VeinProb2[C_vein == 255]) < 0.6):
154
+ image_color[
155
+ (C_vein[:, :] == 255) & (image_color[:, :, 2] == 255)] = [255, 0,
156
+ 0]
157
+
158
+ # out artery continuity
159
+ arter = image_color[:, :, 0]
160
+ contours_arter, hierarchy_a = cv2.findContours(arter, cv2.RETR_TREE, cv2.CHAIN_APPROX_NONE)
161
+ arter_size = []
162
+ for z in range(len(contours_arter)):
163
+ arter_size.append(contours_arter[z].size)
164
+ arter_size = np.sort(np.array(arter_size))
165
+ for arter_seg in range(len(contours_arter)):
166
+ judge_number = min(np.mean(arter_size),500)
167
+
168
+ if contours_arter[arter_seg].size < judge_number:
169
+
170
+ C_arter = np.zeros(vessel.shape, np.uint8)
171
+ C_arter = cv2.drawContours(C_arter, contours_arter, arter_seg, (255, 255, 255), cv2.FILLED)
172
+ max_diameter = np.max(Skeleton(C_arter, test_use_vessel)[1])
173
+
174
+ image_color_copy_vein = image_color[:, :, 2].copy()
175
+ image_color_copy_arter = image_color[:, :, 0].copy()
176
+ kernel = cv2.getStructuringElement(cv2.MORPH_ELLIPSE, (
177
+ 4 * int(np.ceil(max_diameter)), 4 * int(np.ceil(max_diameter))))
178
+ image_color_copy_arter = cv2.drawContours(image_color_copy_arter, contours_arter, arter_seg,
179
+ (0, 0, 0),
180
+ cv2.FILLED)
181
+ C_arter_dilate = cv2.dilate(C_arter, kernel, iterations=1)
182
+ # image_color[(C_cross[:, :] == 255) & (image_color[:, :, 1] == 255)] = [255, 0, 0]
183
+ C_arter_dilate_judge = np.zeros(arter.shape, np.uint8)
184
+ C_arter_dilate_judge[
185
+ (C_arter_dilate[:, :] == 255) & (image_color_copy_arter[:, :] == 255)] = 1
186
+ C_vein_dilate_judge = np.zeros(arter.shape, np.uint8)
187
+ C_vein_dilate_judge[
188
+ (C_arter_dilate[:, :] == 255) & (image_color_copy_vein[:, :] == 255)] = 1
189
+
190
+ if (len(np.unique(C_arter_dilate_judge)) == 1) & (
191
+ len(np.unique(C_vein_dilate_judge)) != 1) & (np.mean(ArteryProb2[C_arter == 255]) < 0.6):
192
+ image_color[
193
+ (C_arter[:, :] == 255) & (image_color[:, :, 0] == 255)] = [0,
194
+ 0,
195
+ 255]
196
+ loop=loop+1
197
+
198
+ # image_basename = os.path.basename(image_basename)
199
+ # Image.fromarray(image_color).save(os.path.join(out_path, f'{image_basename.split(".")[0]}.png'))
200
+ # Image.fromarray(np.uint8(VesselProb*255)).save(os.path.join(out_path, f'{image_basename.split(".")[0]}_vessel.png'))
201
+ return image_color
202
+
AV/Tools/AVclassifiationMetrics.py ADDED
@@ -0,0 +1,465 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import cv2
2
+ import numpy as np
3
+ import matplotlib.pyplot as plt
4
+ import os
5
+ # import natsort
6
+ import pandas as pd
7
+ from skimage import morphology
8
+ from sklearn import metrics
9
+ from Tools.BinaryPostProcessing import binaryPostProcessing3
10
+ from PIL import Image
11
+ from scipy.signal import convolve2d
12
+ import time
13
+
14
+ #########################################
15
+ def softmax(x):
16
+ e_x = np.exp(x - np.max(x))
17
+ return e_x / e_x.sum()
18
+
19
+
20
+ def Skeleton(a_or_v, a_and_v):
21
+ th = np.uint8(a_and_v)
22
+ # Distance transform for maximum diameter
23
+ vessels = th.copy()
24
+ dist = cv2.distanceTransform(a_or_v, cv2.DIST_L2, 3)
25
+ thinned = np.uint8(morphology.skeletonize((vessels / 255))) * 255
26
+ return thinned, dist
27
+
28
+
29
+ def cal_crosspoint(vessel):
30
+ # Removing bifurcation points by using specially designed kernels
31
+ # Can be optimized further! (not the best implementation)
32
+ thinned1, dist = Skeleton(vessel, vessel)
33
+ thh = thinned1.copy()
34
+ thh = thh / 255
35
+ kernel1 = np.array([[1, 1, 1], [1, 10, 1], [1, 1, 1]])
36
+
37
+ th = convolve2d(thh, kernel1, mode="same")
38
+ for u in range(th.shape[0]):
39
+ for j in range(th.shape[1]):
40
+ if th[u, j] >= 13.0:
41
+ cv2.circle(vessel, (j, u), 3 * int(dist[u, j]), (0, 0, 0), -1)
42
+ # thi = cv2.cvtColor(thi, cv2.COLOR_BGR2GRAY)
43
+ return vessel
44
+
45
+
46
+
47
+ def AVclassifiation_pos_ve(out_path, PredAll1, PredAll2, VesselPredAll, DataSet=0, image_basename=''):
48
+ """
49
+ predAll1: predition results of artery
50
+ predAll2: predition results of vein
51
+ VesselPredAll: predition results of vessel
52
+ DataSet: the length of dataset
53
+ image_basename: the name of saved mask
54
+ """
55
+
56
+ ImgN = DataSet
57
+
58
+ for ImgNumber in range(ImgN):
59
+
60
+ height, width = PredAll1.shape[2:4]
61
+
62
+ VesselProb = VesselPredAll[ImgNumber, 0, :, :]
63
+
64
+ ArteryProb = PredAll1[ImgNumber, 0, :, :]
65
+ VeinProb = PredAll2[ImgNumber, 0, :, :]
66
+
67
+ VesselSeg = (VesselProb >= 0.1) & ((ArteryProb >= 0.2) | (VeinProb >= 0.2))
68
+ # VesselSeg = (VesselProb >= 0.5) & ((ArteryProb >= 0.5) | (VeinProb >= 0.5))
69
+ crossSeg = (VesselProb >= 0.1) & ((ArteryProb >= 0.6) & (VeinProb >= 0.6))
70
+ VesselSeg = binaryPostProcessing3(VesselSeg, removeArea=100, fillArea=20)
71
+
72
+ vesselPixels = np.where(VesselSeg > 0)
73
+
74
+ ArteryProb2 = np.zeros((height, width))
75
+ VeinProb2 = np.zeros((height, width))
76
+ crossProb2 = np.zeros((height, width))
77
+ image_color = np.zeros((3, height, width), dtype=np.uint8)
78
+ for i in range(len(vesselPixels[0])):
79
+ row = vesselPixels[0][i]
80
+ col = vesselPixels[1][i]
81
+ probA = ArteryProb[row, col]
82
+ probV = VeinProb[row, col]
83
+ ArteryProb2[row, col] = probA
84
+ VeinProb2[row, col] = probV
85
+
86
+ test_use_vessel = np.zeros((height, width), np.uint8)
87
+ ArteryPred2 = ((ArteryProb2 >= 0.2) & (ArteryProb2 > VeinProb2))
88
+ VeinPred2 = ((VeinProb2 >= 0.2) & (VeinProb2 > ArteryProb2))
89
+
90
+ ArteryPred2 = binaryPostProcessing3(ArteryPred2, removeArea=100, fillArea=20)
91
+ VeinPred2 = binaryPostProcessing3(VeinPred2, removeArea=100, fillArea=20)
92
+
93
+ image_color[0, :, :] = ArteryPred2 * 255
94
+ image_color[2, :, :] = VeinPred2 * 255
95
+ image_color = image_color.transpose((1, 2, 0))
96
+
97
+ imgBin_vessel = ArteryPred2 + VeinPred2
98
+ imgBin_vessel[imgBin_vessel[:, :] == 2] = 1
99
+ test_use_vessel = imgBin_vessel.copy() * 255
100
+
101
+ vessel = cal_crosspoint(test_use_vessel)
102
+
103
+ contours_vessel, hierarchy_c = cv2.findContours(vessel, cv2.RETR_TREE, cv2.CHAIN_APPROX_NONE)
104
+
105
+ # inter continuity
106
+ for vessel_seg in range(len(contours_vessel)):
107
+ C_vessel = np.zeros(vessel.shape, np.uint8)
108
+ C_vessel = cv2.drawContours(C_vessel, contours_vessel, vessel_seg, (255, 255, 255), cv2.FILLED)
109
+ cli = np.mean(VeinProb2[C_vessel == 255]) / np.mean(ArteryProb2[C_vessel == 255])
110
+ if cli < 1:
111
+ image_color[
112
+ (C_vessel[:, :] == 255) & (test_use_vessel[:, :] == 255)] = [255, 0, 0]
113
+ else:
114
+ image_color[
115
+ (C_vessel[:, :] == 255) & (test_use_vessel[:, :] == 255)] = [0, 0, 255]
116
+
117
+
118
+ Image.fromarray(image_color).save(os.path.join(out_path, f'{image_basename[ImgNumber].split(".")[0]}.png'))
119
+
120
+
121
+ def AVclassifiation(out_path, PredAll1, PredAll2, VesselPredAll, DataSet=0, image_basename=''):
122
+ """
123
+ predAll1: predition results of artery
124
+ predAll2: predition results of vein
125
+ VesselPredAll: predition results of vessel
126
+ DataSet: the length of dataset
127
+ image_basename: the name of saved mask
128
+ """
129
+
130
+ ImgN = DataSet
131
+
132
+ for ImgNumber in range(ImgN):
133
+
134
+ height, width = PredAll1.shape[2:4]
135
+
136
+ VesselProb = VesselPredAll[ImgNumber, 0, :, :]
137
+
138
+ ArteryProb = PredAll1[ImgNumber, 0, :, :]
139
+ VeinProb = PredAll2[ImgNumber, 0, :, :]
140
+
141
+ VesselSeg = (VesselProb >= 0.1) & ((ArteryProb >= 0.2) | (VeinProb >= 0.2))
142
+ # VesselSeg = (VesselProb >= 0.5) & ((ArteryProb >= 0.5) | (VeinProb >= 0.5))
143
+ crossSeg = (VesselProb >= 0.1) & ((ArteryProb >= 0.6) & (VeinProb >= 0.6))
144
+ VesselSeg = binaryPostProcessing3(VesselSeg, removeArea=100, fillArea=20)
145
+
146
+ vesselPixels = np.where(VesselSeg > 0)
147
+
148
+ ArteryProb2 = np.zeros((height, width))
149
+ VeinProb2 = np.zeros((height, width))
150
+ crossProb2 = np.zeros((height, width))
151
+ image_color = np.zeros((3, height, width), dtype=np.uint8)
152
+ for i in range(len(vesselPixels[0])):
153
+ row = vesselPixels[0][i]
154
+ col = vesselPixels[1][i]
155
+ probA = ArteryProb[row, col]
156
+ probV = VeinProb[row, col]
157
+ ArteryProb2[row, col] = probA
158
+ VeinProb2[row, col] = probV
159
+
160
+ test_use_vessel = np.zeros((height, width), np.uint8)
161
+ ArteryPred2 = ((ArteryProb2 >= 0.2) & (ArteryProb2 > VeinProb2))
162
+ VeinPred2 = ((VeinProb2 >= 0.2) & (VeinProb2 > ArteryProb2))
163
+
164
+ ArteryPred2 = binaryPostProcessing3(ArteryPred2, removeArea=100, fillArea=20)
165
+ VeinPred2 = binaryPostProcessing3(VeinPred2, removeArea=100, fillArea=20)
166
+
167
+ image_color[0, :, :] = ArteryPred2 * 255
168
+ image_color[2, :, :] = VeinPred2 * 255
169
+ image_color = image_color.transpose((1, 2, 0))
170
+
171
+ imgBin_vessel = ArteryPred2 + VeinPred2
172
+ imgBin_vessel[imgBin_vessel[:, :] == 2] = 1
173
+ test_use_vessel = imgBin_vessel.copy() * 255
174
+
175
+ vessel = cal_crosspoint(test_use_vessel)
176
+
177
+ contours_vessel, hierarchy_c = cv2.findContours(vessel, cv2.RETR_TREE, cv2.CHAIN_APPROX_NONE)
178
+
179
+ # inter continuity
180
+ for vessel_seg in range(len(contours_vessel)):
181
+ C_vessel = np.zeros(vessel.shape, np.uint8)
182
+ C_vessel = cv2.drawContours(C_vessel, contours_vessel, vessel_seg, (255, 255, 255), cv2.FILLED)
183
+ cli = np.mean(VeinProb2[C_vessel == 255]) / np.mean(ArteryProb2[C_vessel == 255])
184
+ if cli < 1:
185
+ image_color[
186
+ (C_vessel[:, :] == 255) & (test_use_vessel[:, :] == 255)] = [255, 0, 0]
187
+ else:
188
+ image_color[
189
+ (C_vessel[:, :] == 255) & (test_use_vessel[:, :] == 255)] = [0, 0, 255]
190
+
191
+ # out vein continuity
192
+ vein = image_color[:, :, 2]
193
+ contours_vein, hierarchy_b = cv2.findContours(vein, cv2.RETR_TREE, cv2.CHAIN_APPROX_NONE)
194
+
195
+ vein_size = []
196
+ for z in range(len(contours_vein)):
197
+ vein_size.append(contours_vein[z].size)
198
+ vein_size = np.sort(np.array(vein_size))
199
+ # image_color_copy = np.uint8(image_color).copy()
200
+ for vein_seg in range(len(contours_vein)):
201
+ judge_number = min(np.mean(vein_size),500)
202
+ # cv2.putText(image_color_copy, str(vein_seg), (int(contours_vein[vein_seg][0][0][0]), int(contours_vein[vein_seg][0][0][1])), 3, 1,
203
+ # color=(255, 0, 0), thickness=2)
204
+ if contours_vein[vein_seg].size < judge_number:
205
+ C_vein = np.zeros(vessel.shape, np.uint8)
206
+ C_vein = cv2.drawContours(C_vein, contours_vein, vein_seg, (255, 255, 255), cv2.FILLED)
207
+ max_diameter = np.max(Skeleton(C_vein, C_vein)[1])
208
+
209
+ image_color_copy_vein = image_color[:, :, 2].copy()
210
+ image_color_copy_arter = image_color[:, :, 0].copy()
211
+ # a_ori = cv2.drawContours(a_ori, contours_b, k, (0, 0, 0), cv2.FILLED)
212
+ image_color_copy_vein = cv2.drawContours(image_color_copy_vein, contours_vein, vein_seg,
213
+ (0, 0, 0),
214
+ cv2.FILLED)
215
+ # image_color[(C_cross[:, :] == 255) & (image_color[:, :, 1] == 255)] = [255, 0, 0]
216
+ kernel = cv2.getStructuringElement(cv2.MORPH_ELLIPSE, (
217
+ 4 * int(np.ceil(max_diameter)), 4 * int(np.ceil(max_diameter))))
218
+ C_vein_dilate = cv2.dilate(C_vein, kernel, iterations=1)
219
+ # cv2.imwrite(path_out_3, C_vein_dilate)
220
+ C_vein_dilate_judge = np.zeros(vessel.shape, np.uint8)
221
+ C_vein_dilate_judge[
222
+ (C_vein_dilate[:, :] == 255) & (image_color_copy_vein == 255)] = 1
223
+ C_arter_dilate_judge = np.zeros(vessel.shape, np.uint8)
224
+ C_arter_dilate_judge[
225
+ (C_vein_dilate[:, :] == 255) & (image_color_copy_arter == 255)] = 1
226
+ if (len(np.unique(C_vein_dilate_judge)) == 1) & (
227
+ len(np.unique(C_arter_dilate_judge)) != 1) & (np.mean(VeinProb2[C_vein == 255]) < 0.5):
228
+ image_color[
229
+ (C_vein[:, :] == 255) & (image_color[:, :, 2] == 255)] = [255, 0,
230
+ 0]
231
+
232
+ # out artery continuity
233
+ arter = image_color[:, :, 0]
234
+ contours_arter, hierarchy_a = cv2.findContours(arter, cv2.RETR_TREE, cv2.CHAIN_APPROX_NONE)
235
+ arter_size = []
236
+ for z in range(len(contours_arter)):
237
+ arter_size.append(contours_arter[z].size)
238
+ arter_size = np.sort(np.array(arter_size))
239
+ for arter_seg in range(len(contours_arter)):
240
+ judge_number = min(np.mean(arter_size),500)
241
+
242
+ if contours_arter[arter_seg].size < judge_number:
243
+
244
+ C_arter = np.zeros(vessel.shape, np.uint8)
245
+ C_arter = cv2.drawContours(C_arter, contours_arter, arter_seg, (255, 255, 255), cv2.FILLED)
246
+ max_diameter = np.max(Skeleton(C_arter, test_use_vessel)[1])
247
+
248
+ image_color_copy_vein = image_color[:, :, 2].copy()
249
+ image_color_copy_arter = image_color[:, :, 0].copy()
250
+ kernel = cv2.getStructuringElement(cv2.MORPH_ELLIPSE, (
251
+ 4 * int(np.ceil(max_diameter)), 4 * int(np.ceil(max_diameter))))
252
+ image_color_copy_arter = cv2.drawContours(image_color_copy_arter, contours_arter, arter_seg,
253
+ (0, 0, 0),
254
+ cv2.FILLED)
255
+ C_arter_dilate = cv2.dilate(C_arter, kernel, iterations=1)
256
+ # image_color[(C_cross[:, :] == 255) & (image_color[:, :, 1] == 255)] = [255, 0, 0]
257
+ C_arter_dilate_judge = np.zeros(arter.shape, np.uint8)
258
+ C_arter_dilate_judge[
259
+ (C_arter_dilate[:, :] == 255) & (image_color_copy_arter[:, :] == 255)] = 1
260
+ C_vein_dilate_judge = np.zeros(arter.shape, np.uint8)
261
+ C_vein_dilate_judge[
262
+ (C_arter_dilate[:, :] == 255) & (image_color_copy_vein[:, :] == 255)] = 1
263
+
264
+ if (len(np.unique(C_arter_dilate_judge)) == 1) & (
265
+ len(np.unique(C_vein_dilate_judge)) != 1) & (np.mean(VeinProb2[C_vein == 255]) < 0.5):
266
+ image_color[
267
+ (C_arter[:, :] == 255) & (image_color[:, :, 0] == 255)] = [0,
268
+ 0,
269
+ 255]
270
+
271
+ Image.fromarray(image_color).save(os.path.join(out_path, f'{image_basename[ImgNumber].split(".")[0]}.png'))
272
+
273
+
274
+
275
+ def AVclassifiationMetrics_skeletonPixles(PredAll1,PredAll2,VesselPredAll,LabelAll1,LabelAll2,LabelVesselAll,DataSet=0, onlyMeasureSkeleton=False, strict_mode=True):
276
+
277
+ """
278
+ predAll1: predition results of artery
279
+ predAll2: predition results of vein
280
+ VesselPredAll: predition results of vessel
281
+ LabelAll1: label of artery
282
+ LabelAll2: label of vein
283
+ LabelVesselAll: label of vessel
284
+ DataSet: the length of dataset
285
+ onlyMeasureSkeleton: measure skeleton
286
+ strict_mode: strict
287
+ """
288
+
289
+
290
+ ImgN = DataSet
291
+
292
+ senList = []
293
+ specList = []
294
+ accList = []
295
+ f1List = []
296
+ ioulist = []
297
+ diceList = []
298
+
299
+
300
+
301
+ senList_sk = []
302
+ specList_sk = []
303
+ accList_sk = []
304
+ f1List_sk = []
305
+ ioulist_sk = []
306
+ diceList_sk = []
307
+
308
+ bad_case_count = 0
309
+ bad_case_index = []
310
+
311
+ for ImgNumber in range(ImgN):
312
+
313
+ height, width = PredAll1.shape[2:4]
314
+
315
+
316
+ VesselProb = VesselPredAll[ImgNumber, 0,:,:]
317
+ VesselLabel = LabelVesselAll[ImgNumber, 0, :, :]
318
+
319
+
320
+ ArteryLabel = LabelAll1[ImgNumber, 0, :, :]
321
+ VeinLabel = LabelAll2[ImgNumber, 0, :, :]
322
+
323
+ ArteryProb = PredAll1[ImgNumber, 0,:,:]
324
+ VeinProb = PredAll2[ImgNumber, 0,:,:]
325
+
326
+ if strict_mode:
327
+ VesselSeg = VesselLabel
328
+ else:
329
+ VesselSeg = (VesselProb >= 0.1) & ((ArteryProb >= 0.2) | (VeinProb >= 0.2))
330
+ VesselSeg= binaryPostProcessing3(VesselSeg, removeArea=100, fillArea=20)
331
+
332
+ vesselPixels = np.where(VesselSeg>0)
333
+
334
+ ArteryProb2 = np.zeros((height,width))
335
+ VeinProb2 = np.zeros((height,width))
336
+
337
+ for i in range(len(vesselPixels[0])):
338
+ row = vesselPixels[0][i]
339
+ col = vesselPixels[1][i]
340
+ probA = ArteryProb[row, col]
341
+ probV = VeinProb[row, col]
342
+ ArteryProb2[row, col] = probA
343
+ VeinProb2[row, col] = probV
344
+
345
+
346
+ ArteryLabelImg2= ArteryLabel.copy()
347
+ VeinLabelImg2= VeinLabel.copy()
348
+ ArteryLabelImg2 [VesselSeg == 0] = 0
349
+ VeinLabelImg2 [VesselSeg == 0] = 0
350
+ ArteryVeinLabelImg = np.zeros((height, width,3), np.uint8)
351
+ ArteryVeinLabelImg[ArteryLabelImg2>0] = (255, 0, 0)
352
+ ArteryVeinLabelImg[VeinLabelImg2>0] = (0, 0, 255)
353
+ ArteryVeinLabelCommon = np.bitwise_and(ArteryLabelImg2>0, VeinLabelImg2>0)
354
+
355
+ if strict_mode:
356
+ ArteryPred2 = ArteryProb2 > 0.5
357
+ VeinPred2 = VeinProb2 >= 0.5
358
+ else:
359
+ ArteryPred2 = (ArteryProb2 > 0.2) & (ArteryProb2>VeinProb2)
360
+ VeinPred2 = (VeinProb2 >= 0.2) & (ArteryProb2<VeinProb2)
361
+
362
+ ArteryPred2= binaryPostProcessing3(ArteryPred2, removeArea=100, fillArea=20)
363
+ VeinPred2= binaryPostProcessing3(VeinPred2, removeArea=100, fillArea=20)
364
+
365
+ TPimg = np.bitwise_and(ArteryPred2>0, ArteryLabelImg2>0) # 真实为动脉,预测为动脉
366
+ TNimg = np.bitwise_and(VeinPred2>0, VeinLabelImg2>0) # 真实为静脉,预测为静脉
367
+ FPimg = np.bitwise_and(ArteryPred2>0, VeinLabelImg2>0) # 真实为静脉,预测为动脉
368
+ FPimg = np.bitwise_and(FPimg, np.bitwise_not(ArteryVeinLabelCommon)) # 真实为静脉,预测为动脉,且不属于动静脉共存区域
369
+
370
+ FNimg = np.bitwise_and(VeinPred2>0, ArteryLabelImg2>0) # 真实为动脉,预测为静脉
371
+ FNimg = np.bitwise_and(FNimg, np.bitwise_not(ArteryVeinLabelCommon)) # 真实为动脉,预测为静脉,且不属于动静脉共存区域
372
+
373
+
374
+ if not onlyMeasureSkeleton:
375
+ TPa = np.count_nonzero(TPimg)
376
+ TNa = np.count_nonzero(TNimg)
377
+ FPa = np.count_nonzero(FPimg)
378
+ FNa = np.count_nonzero(FNimg)
379
+
380
+ sensitivity = TPa/(TPa+FNa)
381
+ specificity = TNa/(TNa + FPa)
382
+ acc = (TPa + TNa) /(TPa + TNa + FPa + FNa)
383
+ f1 = 2*TPa/(2*TPa + FPa + FNa)
384
+ dice = 2*TPa/(2*TPa + FPa + FNa)
385
+ iou = TPa/(TPa + FPa + FNa)
386
+ #print('Pixel-wise Metrics', acc, sensitivity, specificity)
387
+
388
+ senList.append(sensitivity)
389
+ specList.append(specificity)
390
+ accList.append(acc)
391
+ f1List.append(f1)
392
+ diceList.append(dice)
393
+ ioulist.append(iou)
394
+ # print('Avg Per:', np.mean(accList), np.mean(senList), np.mean(specList))
395
+
396
+ ##################################################################################################
397
+ """Skeleton Performance Measurement"""
398
+ Skeleton = np.uint8(morphology.skeletonize(VesselSeg))
399
+ #np.save('./tmpfile/tmp_skeleton'+str(ImgNumber)+'.npy',Skeleton)
400
+
401
+ ArterySkeletonLabel = cv2.bitwise_and(ArteryLabelImg2, ArteryLabelImg2, mask=Skeleton)
402
+ VeinSkeletonLabel = cv2.bitwise_and(VeinLabelImg2, VeinLabelImg2, mask=Skeleton)
403
+
404
+ ArterySkeletonPred = cv2.bitwise_and(ArteryPred2, ArteryPred2, mask=Skeleton)
405
+ VeinSkeletonPred = cv2.bitwise_and(VeinPred2, VeinPred2, mask=Skeleton)
406
+
407
+
408
+ skeletonPixles = np.where(Skeleton >0)
409
+
410
+ TPa_sk = 0
411
+ TNa_sk = 0
412
+ FPa_sk = 0
413
+ FNa_sk = 0
414
+ for i in range(len(skeletonPixles[0])):
415
+ row = skeletonPixles[0][i]
416
+ col = skeletonPixles[1][i]
417
+ if ArterySkeletonLabel[row, col] == 1 and ArterySkeletonPred[row, col] == 1:
418
+ TPa_sk = TPa_sk +1
419
+
420
+ elif VeinSkeletonLabel[row, col] == 1 and VeinSkeletonPred[row, col] == 1:
421
+ TNa_sk = TNa_sk + 1
422
+
423
+ elif ArterySkeletonLabel[row, col] == 1 and VeinSkeletonPred[row, col] == 1\
424
+ and ArteryVeinLabelCommon[row, col] == 0:
425
+ FNa_sk = FNa_sk + 1
426
+
427
+ elif VeinSkeletonLabel[row, col] == 1 and ArterySkeletonPred[row, col] == 1\
428
+ and ArteryVeinLabelCommon[row, col] == 0:
429
+ FPa_sk = FPa_sk + 1
430
+
431
+ else:
432
+ pass
433
+
434
+ if (TPa_sk+FNa_sk)==0 and (TNa_sk + FPa_sk)==0 and (TPa_sk + TNa_sk + FPa_sk + FNa_sk)==0:
435
+ bad_case_count += 1
436
+ bad_case_index.append(ImgNumber)
437
+ sensitivity_sk = TPa_sk/(TPa_sk+FNa_sk)
438
+ specificity_sk = TNa_sk/(TNa_sk + FPa_sk)
439
+ acc_sk = (TPa_sk + TNa_sk) /(TPa_sk + TNa_sk + FPa_sk + FNa_sk)
440
+ f1_sk = 2*TPa_sk/(2*TPa_sk + FPa_sk + FNa_sk)
441
+ dice_sk = 2*TPa_sk/(2*TPa_sk + FPa_sk + FNa_sk)
442
+ iou_sk = TPa_sk/(TPa_sk + FPa_sk + FNa_sk)
443
+
444
+ senList_sk.append(sensitivity_sk)
445
+ specList_sk.append(specificity_sk)
446
+ accList_sk.append(acc_sk)
447
+ f1List_sk.append(f1_sk)
448
+ diceList_sk.append(dice_sk)
449
+ ioulist_sk.append(iou_sk)
450
+ #print('Skeletonal Metrics', acc_sk, sensitivity_sk, specificity_sk)
451
+ if onlyMeasureSkeleton:
452
+ print('Avg Skeleton Performance:', np.mean(accList_sk), np.mean(senList_sk), np.mean(specList_sk))
453
+ return np.mean(accList_sk), np.mean(specList_sk),np.mean(senList_sk), np.mean(f1List_sk), np.mean(diceList_sk), np.mean(ioulist_sk), bad_case_index
454
+ else:
455
+ print('Avg Pixel-wise Performance:', np.mean(accList), np.mean(senList), np.mean(specList))
456
+ return np.mean(accList), np.mean(specList),np.mean(senList),np.mean(f1List),np.mean(diceList),np.mean(ioulist)
457
+
458
+
459
+
460
+ if __name__ == '__main__':
461
+
462
+
463
+ pro_path = r'F:\dw\RIP-AV\AV\log\DRIVE\running_result\ProMap_testset.npy'
464
+ ps = np.load(pro_path)
465
+ AVclassifiation(r'./', ps[:, 0:1, :, :], ps[:, 1:2, :, :], ps[:, 2:, :, :], DataSet=ps.shape[0], image_basename=[str(i)+'.png' for i in range(20)])
AV/Tools/BGR2RGB.py ADDED
@@ -0,0 +1,25 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+
2
+
3
+ import cv2
4
+
5
+ def BGR2RGB(Image):
6
+ """
7
+
8
+ :param Image:
9
+ :return: RGBImage
10
+ """
11
+
12
+ #the input image is BGR image from OpenCV
13
+ RGBImage = cv2.cvtColor(Image, cv2.COLOR_BGR2RGB)
14
+ return RGBImage
15
+
16
+
17
+ def RGB2BGR(Image):
18
+ """
19
+
20
+ :param Image:
21
+ :return: BGRImage
22
+ """
23
+ #the input image is RGB image
24
+ BGRImage = cv2.cvtColor(Image, cv2.COLOR_RGB2BGR)
25
+ return BGRImage
AV/Tools/BinaryPostProcessing.py ADDED
@@ -0,0 +1,108 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+
2
+
3
+ import numpy as np
4
+ from skimage import morphology, measure
5
+ from AV.Tools.Remove_small_holes import remove_small_holes
6
+ import scipy.ndimage.morphology as scipyMorphology
7
+
8
+
9
+ def binaryPostProcessing(BinaryImage, removeArea):
10
+ """
11
+ Post process the binary segmentation
12
+ :param BinaryImage:
13
+ :param removeArea:
14
+ :return: Img_BW
15
+ """
16
+
17
+ BinaryImage[BinaryImage > 0] = 1
18
+
19
+ ###9s
20
+ # Img_BW = pymorph.binary(BinaryImage)
21
+ # Img_BW = pymorph.areaopen(Img_BW, removeArea)
22
+ # Img_BW = pymorph.areaclose(Img_BW, 50)
23
+ # Img_BW = np.uint8(Img_BW)
24
+
25
+ ###2.5 s
26
+ # Img_BW = np.uint8(BinaryImage)
27
+ # Img_BW = ITK_LabelImage(Img_BW, removeArea)
28
+ # Img_BW[Img_BW >0] = 1
29
+
30
+ Img_BW = BinaryImage.copy()
31
+ BinaryImage_Label = measure.label(Img_BW)
32
+ for i, region in enumerate(measure.regionprops(BinaryImage_Label)):
33
+ if region.area < removeArea:
34
+ Img_BW[BinaryImage_Label == i + 1] = 0
35
+ else:
36
+ pass
37
+
38
+ Img_BW = morphology.binary_closing(Img_BW, morphology.disk(3))
39
+ Img_BW = remove_small_holes(Img_BW, 50)
40
+ Img_BW = np.uint8(Img_BW)
41
+
42
+ return Img_BW
43
+
44
+
45
+ ################Three parameters
46
+ def binaryPostProcessing3(BinaryImage, removeArea, fillArea):
47
+ """
48
+ Post process the binary image
49
+ :param BinaryImage:
50
+ :param removeArea:
51
+ :param fillArea:
52
+ :return: Img_BW
53
+ """
54
+
55
+ BinaryImage[BinaryImage>0]=1
56
+
57
+ ####takes 0.9s, result is good
58
+ Img_BW = BinaryImage.copy()
59
+ BinaryImage_Label = measure.label(Img_BW)
60
+ for i, region in enumerate(measure.regionprops(BinaryImage_Label)):
61
+ if region.area < removeArea:
62
+ Img_BW[BinaryImage_Label == i + 1] = 0
63
+ else:
64
+ pass
65
+
66
+ # ####takes 0.01s, result is bad
67
+ # temptime = time.time()
68
+ # Img_BW = morphology.remove_small_objects(BinaryImage, removeArea)
69
+ # print "binaryPostProcessing3, ITK_LabelImage time:", time.time() - temptime
70
+
71
+
72
+ Img_BW = morphology.binary_closing(Img_BW, morphology.square(3))
73
+ # Img_BW = remove_small_holes(Img_BW, fillArea)
74
+
75
+ Img_BW_filled = scipyMorphology.binary_fill_holes(Img_BW)
76
+ Img_BW_dif = np.uint8(Img_BW_filled) - np.uint8(Img_BW)
77
+ Img_BW_difLabel = measure.label(Img_BW_dif)
78
+ FilledImg = np.zeros(Img_BW.shape)
79
+ for i, region in enumerate(measure.regionprops(Img_BW_difLabel)):
80
+ if region.area < fillArea:
81
+ FilledImg[Img_BW_difLabel == i + 1] = 1
82
+ else:
83
+ pass
84
+ Img_BW[FilledImg > 0] = 1
85
+
86
+ Img_BW = np.uint8(Img_BW)
87
+ return Img_BW
88
+
89
+
90
+ def removeSmallBLobs(BinaryImage, removeArea):
91
+ """
92
+ Post process the binary image
93
+ :param BinaryImage:
94
+ :param removeArea:
95
+ """
96
+
97
+ BinaryImage[BinaryImage>0]=1
98
+
99
+ ####takes 0.9s, result is good
100
+ Img_BW = BinaryImage.copy()
101
+ BinaryImage_Label = measure.label(Img_BW)
102
+ for i, region in enumerate(measure.regionprops(BinaryImage_Label)):
103
+ if region.area < removeArea:
104
+ Img_BW[BinaryImage_Label == i + 1] = 0
105
+ else:
106
+ pass
107
+ return np.uint8(Img_BW)
108
+
AV/Tools/FakePad.py ADDED
@@ -0,0 +1,115 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+
2
+
3
+ from __future__ import division
4
+
5
+ import cv2
6
+ import numpy as np
7
+ from skimage import morphology
8
+ np.seterr(divide='ignore', invalid='ignore')
9
+
10
+ """This is the profiled code, very fast, takes 0.25s"""
11
+ def fakePad(Image, Mask, iterations=50):
12
+ """
13
+ add an extra padding around the front mask
14
+ :param Image:
15
+ :param Mask:
16
+ :param iterations:
17
+ :return: DilatedImg
18
+ """
19
+
20
+ if len(Image.shape) == 3: ##for RGB Image
21
+ """for RGB Images"""
22
+
23
+ Mask0 = Mask.copy()
24
+ height, width = Mask0.shape[:2]
25
+ Mask0[0, :] = 0 # np.zeros(width)
26
+ Mask0[-1, :] = 0 # np.zeros(width)
27
+ Mask0[:, 0] = 0 # np.zeros(height)
28
+ Mask0[:, -1] = 0 # np.zeros(height)
29
+
30
+ # Erodes the mask to avoid weird region near the border.
31
+ structureElement1 = morphology.disk(5)
32
+ Mask0 = cv2.morphologyEx(Mask0, cv2.MORPH_ERODE, structureElement1, iterations=1)
33
+
34
+ # DilatedImg = Img_green_reverse * Mask
35
+ DilatedImg = cv2.bitwise_and(Image, Image, mask=Mask0)
36
+ OldMask = Mask0.copy()
37
+
38
+ filter = np.ones((3, 3))
39
+ filterRows, filterCols = np.where(filter > 0)
40
+ filterRows = filterRows - 1
41
+ filterCols = filterCols - 1
42
+
43
+ structureElement2 = morphology.diamond(1)
44
+ for i in range(0, iterations):
45
+ NewMask = cv2.morphologyEx(OldMask, cv2.MORPH_DILATE, structureElement2, iterations=1)
46
+ pixelIndex = np.where(NewMask - OldMask) # [rows, cols]
47
+ imgValues = np.zeros((len(pixelIndex[0]), len(filterRows), 3))
48
+ for k in range(len(filterRows)):
49
+ filterRowIndexes = pixelIndex[0] - filterRows[k]
50
+ filterColIndexes = pixelIndex[1] - filterCols[k]
51
+
52
+ selectMask0 = np.bitwise_and(np.bitwise_and(filterRowIndexes < height, filterRowIndexes >= 0),
53
+ np.bitwise_and(filterColIndexes < width, filterColIndexes >= 0))
54
+ selectMask1 = OldMask[filterRowIndexes[selectMask0], filterColIndexes[selectMask0]] > 0
55
+ selectedPositions = [filterRowIndexes[selectMask0][selectMask1],
56
+ filterColIndexes[selectMask0][selectMask1]]
57
+ imgValues[np.arange(len(pixelIndex[0]))[selectMask0][selectMask1], k, :] = DilatedImg[
58
+ selectedPositions[0],
59
+ selectedPositions[1], :]
60
+
61
+ DilatedImg[pixelIndex[0], pixelIndex[1], :] = np.sum(imgValues, axis=1) // np.sum(imgValues > 0, axis=1)
62
+
63
+ OldMask = NewMask
64
+
65
+ return DilatedImg
66
+
67
+ ########################################################################
68
+
69
+ else: #for green channel only
70
+ """for green channel only"""
71
+
72
+ Mask0 = Mask.copy()
73
+ height, width = Mask0.shape
74
+ Mask0[0, :] = 0 # np.zeros(width)
75
+ Mask0[-1, :] = 0 # np.zeros(width)
76
+ Mask0[:, 0] = 0 # np.zeros(height)
77
+ Mask0[:, -1] = 0 # np.zeros(height)
78
+
79
+ # Erodes the mask to avoid weird region near the border.
80
+ structureElement1 = morphology.disk(5)
81
+ Mask0 = cv2.morphologyEx(Mask0, cv2.MORPH_ERODE, structureElement1, iterations=1)
82
+
83
+ # DilatedImg = Img_green_reverse * Mask
84
+ DilatedImg = cv2.bitwise_and(Image, Image, mask=Mask0)
85
+
86
+ OldMask = Mask0.copy()
87
+
88
+ filter = np.ones((3, 3))
89
+ filterRows, filterCols = np.where(filter > 0)
90
+ filterRows = filterRows - 1
91
+ filterCols = filterCols - 1
92
+
93
+ structureElement2 = morphology.diamond(1)
94
+ for i in range(0, iterations):
95
+ NewMask = cv2.morphologyEx(OldMask, cv2.MORPH_DILATE, structureElement2, iterations=1)
96
+ pixelIndex = np.where(NewMask - OldMask) # [rows, cols]
97
+
98
+ imgValues = np.zeros((len(pixelIndex[0]), len(filterRows)))
99
+ for k in range(len(filterRows)):
100
+ filterRowIndexes = pixelIndex[0] - filterRows[k]
101
+ filterColIndexes = pixelIndex[1] - filterCols[k]
102
+
103
+ selectMask0 = np.bitwise_and(np.bitwise_and(filterRowIndexes < height, filterRowIndexes >= 0),
104
+ np.bitwise_and(filterColIndexes < width, filterColIndexes >= 0))
105
+ selectMask1 = OldMask[filterRowIndexes[selectMask0], filterColIndexes[selectMask0]] > 0
106
+ selectedPositions = [filterRowIndexes[selectMask0][selectMask1], filterColIndexes[selectMask0][selectMask1]]
107
+ imgValues[np.arange(len(pixelIndex[0]))[selectMask0][selectMask1], k] = DilatedImg[selectedPositions[0], selectedPositions[1]]
108
+
109
+ DilatedImg[pixelIndex[0], pixelIndex[1]] = np.sum(imgValues, axis=1) / np.sum(imgValues > 0, axis=1)
110
+
111
+ OldMask = NewMask
112
+
113
+ return DilatedImg
114
+
115
+
AV/Tools/Float2Uint.py ADDED
@@ -0,0 +1,18 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+
2
+
3
+ import numpy as np
4
+
5
+ def float2Uint(Image_float):
6
+ """
7
+ Transfer float image to np.uint8 type
8
+ :param Image_float:
9
+ :return:LnGray
10
+ """
11
+
12
+ MaxLn = np.max(Image_float)
13
+ MinLn = np.min(Image_float)
14
+ # LnGray = 255*(Image_float - MinLn)//(MaxLn - MinLn + 1e-6)
15
+ LnGray = 255 * ((Image_float - MinLn) / float((MaxLn - MinLn + 1e-6)))
16
+ LnGray = np.array(LnGray, dtype = np.uint8)
17
+
18
+ return LnGray
AV/Tools/Hemelings_eval.py ADDED
@@ -0,0 +1,119 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import numpy as np
2
+ from skimage.morphology import skeletonize, erosion
3
+ from sklearn.metrics import f1_score, accuracy_score,classification_report,confusion_matrix
4
+ import cv2
5
+ from Tools.BinaryPostProcessing import binaryPostProcessing3
6
+
7
+ def evaluation_code(prediction, groundtruth, mask=None,use_mask=False):
8
+ '''
9
+ Function to evaluate the performance of AV predictions with a given ground truth
10
+ - prediction: should be an image array of [dim1, dim2, img_channels = 3] with arteries in red and veins in blue
11
+ - groundtruth: same as above
12
+ '''
13
+ encoded_pred = np.zeros(prediction.shape[:2], dtype=int)
14
+ encoded_gt = np.zeros(groundtruth.shape[:2], dtype=int)
15
+
16
+ # convert white pixels to green pixels (which are ignored)
17
+ white_ind = np.where(np.logical_and(groundtruth[:,:,0] == 255, groundtruth[:,:,1] == 255, groundtruth[:,:,2] == 255))
18
+ if white_ind[0].size != 0:
19
+ groundtruth[white_ind] = [0,255,0]
20
+
21
+ # translate the images to arrays suited for sklearn metrics
22
+ # --- original -------
23
+ arteriole = np.where(np.logical_and(groundtruth[:,:,0] == 255, groundtruth[:,:,1] == 0)); encoded_gt[arteriole] = 1
24
+ venule = np.where(np.logical_and(groundtruth[:,:,2] == 255, groundtruth[:,:,1] == 0)); encoded_gt[venule] = 2
25
+ arteriole = np.where(prediction[:,:,0] == 255); encoded_pred[arteriole] = 1
26
+ venule = np.where(prediction[:,:,2] == 255); encoded_pred[venule] = 2
27
+ # --------------------
28
+
29
+ # disk and cup
30
+ if use_mask:
31
+ groundtruth = cv2.bitwise_and(groundtruth, groundtruth, mask=mask)
32
+ prediction = cv2.bitwise_and(prediction, prediction, mask=mask)
33
+
34
+ encoded_pred = cv2.bitwise_and(encoded_pred, encoded_pred, mask=mask)
35
+ encoded_gt = cv2.bitwise_and(encoded_gt, encoded_gt, mask=mask)
36
+
37
+ # retrieve the indices for the centerline pixels present in the prediction
38
+ center = np.where(np.logical_and(
39
+ np.logical_or((skeletonize(groundtruth[:,:,0] > 0)),(skeletonize(groundtruth[:,:,2] > 0))),
40
+ encoded_pred[:,:] > 0))
41
+
42
+ encoded_pred_center = encoded_pred[center]
43
+ encoded_gt_center = encoded_gt[center]
44
+
45
+ # retrieve the indices for the centerline pixels present in the groundtruth
46
+ center_comp = np.where(
47
+ np.logical_or(skeletonize(groundtruth[:,:,0] > 0),skeletonize(groundtruth[:,:,2] > 0)))
48
+
49
+ encoded_pred_center_comp = encoded_pred[center_comp]
50
+ encoded_gt_center_comp = encoded_gt[center_comp]
51
+
52
+ # retrieve the indices for discovered centerline pixels - limited to vessels wider than two pixels (for DRIVE)
53
+ center_eroded = np.where(np.logical_and(
54
+ np.logical_or(skeletonize(erosion(groundtruth[:,:,0] > 0)),skeletonize(erosion(groundtruth[:,:,2] > 0))),
55
+ encoded_pred[:,:] > 0))
56
+
57
+ encoded_pred_center_eroded = encoded_pred[center_eroded]
58
+ encoded_gt_center_eroded = encoded_gt[center_eroded]
59
+
60
+ # metrics over full image
61
+ cur1_acc = accuracy_score(encoded_gt.flatten(),encoded_pred.flatten())
62
+ cur1_F1 = f1_score(encoded_gt.flatten(),encoded_pred.flatten(),average='weighted')
63
+ # cls_report = classification_report(encoded_gt.flatten(), encoded_pred.flatten(), target_names=['class_1', 'class_2', 'class_3'])
64
+ # print('Full image')
65
+ # print('Accuracy: {}\nF1: {}\n'.format(cur1_acc, cur1_F1))
66
+ # print('Class report:')
67
+ # print(cls_report)
68
+ metrics1 = [cur1_acc, cur1_F1]
69
+
70
+ # metrics over discovered centerline pixels
71
+ cur2_acc = accuracy_score(encoded_gt_center.flatten(),encoded_pred_center.flatten())
72
+ cur2_F1 = f1_score(encoded_gt_center.flatten(),encoded_pred_center.flatten(),average='weighted')
73
+ # print('Discovered centerline pixels')
74
+ # print('Accuracy: {}\nF1: {}\n'.format(cur2_acc, cur2_F1))
75
+ metrics2 = [cur2_acc, cur2_F1]
76
+
77
+ # metrics over discovered centerline pixels - limited to vessels wider than two pixels
78
+ cur3_acc = accuracy_score(encoded_gt_center_eroded.flatten(),encoded_pred_center_eroded.flatten())
79
+ cur3_F1 = f1_score(encoded_gt_center_eroded.flatten(),encoded_pred_center_eroded.flatten(),average='weighted')
80
+ # print('Discovered centerline pixels of vessels wider than two pixels')
81
+ # print('Accuracy: {}\nF1: {}\n'.format(cur3_acc, cur3_F1))
82
+ metrics3 = [cur3_acc, cur3_F1]
83
+
84
+ # metrics over all centerline pixels in ground truth
85
+ cur4_acc = accuracy_score(encoded_gt_center_comp.flatten(),encoded_pred_center_comp.flatten())
86
+ cur4_F1 = f1_score(encoded_gt_center_comp.flatten(),encoded_pred_center_comp.flatten(),average='weighted')
87
+ # print('Centerline pixels')
88
+ # print('Accuracy: {}\nF1: {}\n'.format(cur4_acc, cur4_F1))
89
+ # confusion matrix
90
+ out = confusion_matrix(encoded_gt_center_comp,encoded_pred_center_comp)#.ravel()
91
+ sens = 0
92
+ sepc = 0
93
+
94
+ if out.shape[0] == 2:
95
+ tn, fp,fn,tp = out.ravel()
96
+ # print(tn, fp,fn,tp)
97
+ else:
98
+ tn = out[1,1]
99
+ fp = out[1,2]
100
+ fn = out[2,1]
101
+ tp = out[2,2]
102
+
103
+ # sens = tpr
104
+ spec = tp/ (tp+fn)
105
+ # spec = TNR
106
+ sens = tn/(fp+tn)
107
+
108
+ metrics4 = [cur4_acc, cur4_F1, sens, spec]
109
+
110
+
111
+ # finally, compute vessel detection rate
112
+ vessel_ind = np.where(encoded_gt>0)
113
+ vessel_gt = encoded_gt[vessel_ind]
114
+ vessel_pred = encoded_pred[vessel_ind]
115
+
116
+ detection_rate = accuracy_score(vessel_gt.flatten(),vessel_pred.flatten())
117
+ # print('Amount of vessels detected: ' + str(detection_rate))
118
+
119
+ return [metrics1,metrics2,metrics3,metrics4,detection_rate]#,encoded_pred,encoded_gt,center,center_comp,center_eroded#,encoded_pred_center_comp,encoded_gt_center_comp
AV/Tools/Im2Double.py ADDED
@@ -0,0 +1,15 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+
2
+
3
+ import numpy as np
4
+
5
+ def im2double(im):
6
+ """
7
+ Transfer np.uint8 to float type
8
+ :param im:
9
+ :return: output image
10
+ """
11
+
12
+ min_val = np.min(im.ravel())
13
+ max_val = np.max(im.ravel())
14
+ out = (im.astype('float') - min_val) / (max_val - min_val)
15
+ return out
AV/Tools/ImageResize.py ADDED
@@ -0,0 +1,214 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import os.path
2
+
3
+ import cv2
4
+ import numpy as np
5
+ from skimage import measure
6
+
7
+ def imageResize(Image, downsizeRatio):
8
+
9
+ ##This program resize the original image
10
+ ##Input: original image and downsizeRatio (user defined parameter: 0.75, 0.5 or 0.2)
11
+ ##Output: the resized image according to the given ratio
12
+
13
+ if downsizeRatio < 1:#len(ImgFileList)
14
+ ImgResized = cv2.resize(Image, dsize=None, fx=downsizeRatio, fy=downsizeRatio)
15
+ else:
16
+ ImgResized = Image
17
+
18
+ ImgResized = np.uint8(ImgResized)
19
+ return ImgResized
20
+
21
+
22
+ def creatMask(Image, threshold = 10):
23
+ ##This program try to creat the mask for the filed-of-view
24
+ ##Input original image (RGB or green channel), threshold (user set parameter, default 10)
25
+ ##Output: the filed-of-view mask
26
+
27
+ if len(Image.shape) == 3: ##RGB image
28
+ gray = cv2.cvtColor(Image, cv2.COLOR_BGR2GRAY)
29
+ Mask0 = gray >= threshold
30
+
31
+ else: #for green channel image
32
+ Mask0 = Image >= threshold
33
+
34
+
35
+ # ######get the largest blob, this takes 0.18s
36
+ cvVersion = int(cv2.__version__.split('.')[0])
37
+
38
+ Mask0 = np.uint8(Mask0)
39
+
40
+ contours, hierarchy = cv2.findContours(Mask0, cv2.RETR_TREE, cv2.CHAIN_APPROX_SIMPLE)
41
+
42
+ areas = [cv2.contourArea(c) for c in contours]
43
+ max_index = np.argmax(areas)
44
+ Mask = np.zeros(Image.shape[:2], dtype=np.uint8)
45
+ cv2.drawContours(Mask, contours, max_index, 1, -1)
46
+
47
+ ResultImg = Image.copy()
48
+ if len(Image.shape) == 3:
49
+ ResultImg[Mask ==0] = (255,255,255)
50
+ else:
51
+ ResultImg[Mask==0] = 255
52
+
53
+ return ResultImg, Mask
54
+
55
+ def shift_rgb(img, *args):
56
+
57
+
58
+
59
+
60
+ result_img = np.empty_like(img)
61
+ shifts = args
62
+ max_value = 255
63
+ # print(shifts)
64
+ for i, shift in enumerate(shifts):
65
+ lut = np.arange(0, max_value + 1).astype("float32")
66
+ lut += shift
67
+
68
+ lut = np.clip(lut, 0, max_value).astype(img.dtype)
69
+ if len(img.shape)==2:
70
+ print(f'=========grey image=======')
71
+ result_img = cv2.LUT(img,lut)
72
+ else:
73
+ result_img[..., i] = cv2.LUT(img[...,i],lut)
74
+
75
+ return result_img
76
+ def cropImage_bak(Image, Mask):
77
+ Image = Image.copy()
78
+ Mask = Mask.copy()
79
+
80
+ leftLimit, rightLimit, upperLimit, lowerLimit = getLimit(Mask)
81
+
82
+
83
+
84
+ if len(Image.shape) == 3:
85
+ ImgCropped = Image[upperLimit:lowerLimit, leftLimit:rightLimit, :]
86
+ MaskCropped = Mask[upperLimit:lowerLimit, leftLimit:rightLimit]
87
+
88
+ ImgCropped[:20, :, :] = 0
89
+ ImgCropped[-20:, :, :] = 0
90
+ ImgCropped[:, :20, :] = 0
91
+ ImgCropped[:, -20:, :] = 0
92
+ MaskCropped[:20, :] = 0
93
+ MaskCropped[-20:, :] = 0
94
+ MaskCropped[:, :20] = 0
95
+ MaskCropped[:, -20:] = 0
96
+ else: #len(Image.shape) == 2:
97
+ ImgCropped = Image[upperLimit:lowerLimit, leftLimit:rightLimit]
98
+ MaskCropped = Mask[upperLimit:lowerLimit, leftLimit:rightLimit]
99
+ ImgCropped[:20, :] = 0
100
+ ImgCropped[-20:, :] = 0
101
+ ImgCropped[:, :20] = 0
102
+ ImgCropped[:, -20:] = 0
103
+ MaskCropped[:20, :] = 0
104
+ MaskCropped[-20:, :] = 0
105
+ MaskCropped[:, :20] = 0
106
+ MaskCropped[:, -20:] = 0
107
+
108
+
109
+ cropLimit = [upperLimit, lowerLimit, leftLimit, rightLimit]
110
+
111
+ return ImgCropped, MaskCropped, cropLimit
112
+
113
+
114
+
115
+ ########################################################
116
+ ###new function to get the limit for cropping.
117
+ ###try to get higher speed than np.where, but not working.
118
+
119
+ def getLimit(Mask):
120
+
121
+ Mask1 = Mask > 0
122
+ colSums = np.sum(Mask1, axis=1)
123
+ rowSums = np.sum(Mask1, axis=0)
124
+ maxColSum = np.max(colSums)
125
+ maxRowSum = np.max(rowSums)
126
+
127
+ colList = np.where(colSums >= 0.01*maxColSum)[0]
128
+ rowList = np.where(rowSums >= 0.01*maxRowSum)[0]
129
+
130
+ leftLimit0 = np.min(rowList)
131
+ rightLimit0 = np.max(rowList)
132
+ upperLimit0 = np.min(colList)
133
+ lowerLimit0 = np.max(colList)
134
+
135
+ margin = 50
136
+ leftLimit = np.clip(leftLimit0-margin, 0, Mask.shape[1])
137
+ rightLimit = np.clip(rightLimit0+margin, 0, Mask.shape[1])
138
+ upperLimit = np.clip(upperLimit0 - margin, 0, Mask.shape[0])
139
+ lowerLimit = np.clip(lowerLimit0 + margin, 0, Mask.shape[0])
140
+
141
+
142
+ return leftLimit, rightLimit, upperLimit, lowerLimit
143
+
144
+
145
+
146
+
147
+
148
+ def cropImage(Image, Mask):
149
+ ##This program will crop the filed of view based on the mask
150
+ ##Input: orginal image, origimal Mask (the image needs to be RGB resized image)
151
+ ##Output: Cropped image, Cropped Mask, the cropping limit
152
+
153
+ height, width = Image.shape[:2]
154
+
155
+ rowsMask0, colsMask0 = np.where(Mask > 0)
156
+ minColIndex0, maxColIndex0 = np.argmin(colsMask0), np.argmax(colsMask0)
157
+ minCol, maxCol = colsMask0[minColIndex0], colsMask0[maxColIndex0]
158
+
159
+ minRowIndex0, maxRowIndex0 = np.argmin(rowsMask0), np.argmax(rowsMask0)
160
+ minRow, maxRow = rowsMask0[minRowIndex0], rowsMask0[maxRowIndex0]
161
+
162
+ upperLimit = np.maximum(0, minRow - 50) #20
163
+ lowerLimit = np.minimum(maxRow + 50, height) #20
164
+ leftLimit = np.maximum(0, minCol - 50) #lowerLimit = np.minimum(maxCol + 50, width) #20
165
+ rightLimit = np.minimum(maxCol + 50, width)
166
+
167
+ if len(Image.shape) == 3:
168
+ ImgCropped = Image[upperLimit:lowerLimit, leftLimit:rightLimit, :]
169
+ MaskCropped = Mask[upperLimit:lowerLimit, leftLimit:rightLimit]
170
+
171
+ ImgCropped[:20, :, :] = 0
172
+ ImgCropped[-20:, :, :] = 0
173
+ ImgCropped[:, :20, :] = 0
174
+ ImgCropped[:, -20:, :] = 0
175
+ MaskCropped[:20, :] = 0
176
+ MaskCropped[-20:, :] = 0
177
+ MaskCropped[:, :20] = 0
178
+ MaskCropped[:, -20:] = 0
179
+ elif len(Image.shape) == 2:
180
+ ImgCropped = Image[upperLimit:lowerLimit, leftLimit:rightLimit]
181
+ MaskCropped = Mask[upperLimit:lowerLimit, leftLimit:rightLimit]
182
+ ImgCropped[:20, :] = 0
183
+ ImgCropped[-20:, :] = 0
184
+ ImgCropped[:, :20] = 0
185
+ ImgCropped[:, -20:] = 0
186
+ MaskCropped[:20, :] = 0
187
+ MaskCropped[-20:, :] = 0
188
+ MaskCropped[:, :20] = 0
189
+ MaskCropped[:, -20:] = 0
190
+ else:
191
+ pass
192
+
193
+
194
+ cropLimit = [upperLimit, lowerLimit, leftLimit, rightLimit]
195
+
196
+ return ImgCropped, MaskCropped, cropLimit
197
+
198
+
199
+ if __name__ == '__main__':
200
+ if not os.path.exists(os.path.join('../data','AV_DRIVE','test','mask')):
201
+ os.makedirs(os.path.join('../data','AV_DRIVE','test','mask'))
202
+
203
+ for file in os.listdir(os.path.join('../data','AV_DRIVE','test','images')):
204
+ # suffix file name
205
+ if file.endswith('.jpg') or file.endswith('.png'):
206
+ # read image
207
+ img = cv2.imread(os.path.join('../data','AV_DRIVE','test','images',file))
208
+
209
+
210
+ _,mask = creatMask(img)
211
+
212
+
213
+ # save mask
214
+ cv2.imwrite(os.path.join('../data','AV_DRIVE','test','mask',file),mask)
AV/Tools/Remove_small_holes.py ADDED
@@ -0,0 +1,88 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+
2
+
3
+ import numpy as np
4
+ import functools
5
+ import warnings
6
+ from scipy import ndimage as ndi
7
+ from skimage import morphology
8
+
9
+
10
+ def remove_small_holes(ar, min_size=64, connectivity=1, in_place=False):
11
+ """Remove continguous holes smaller than the specified size.
12
+ Parameters
13
+ ----------
14
+ ar : ndarray (arbitrary shape, int or bool type)
15
+ The array containing the connected components of interest.
16
+ min_size : int, optional (default: 64)
17
+ The hole component size.
18
+ connectivity : int, {1, 2, ..., ar.ndim}, optional (default: 1)
19
+ The connectivity defining the neighborhood of a pixel.
20
+ in_place : bool, optional (default: False)
21
+ If `True`, remove the connected components in the input array itself.
22
+ Otherwise, make a copy.
23
+ Raises
24
+ ------
25
+ TypeError
26
+ If the input array is of an invalid type, such as float or string.
27
+ ValueError
28
+ If the input array contains negative values.
29
+ Returns
30
+ -------
31
+ out : ndarray, same shape and type as input `ar`
32
+ The input array with small holes within connected components removed.
33
+ Examples
34
+ --------
35
+ # >>> from skimage import morphology
36
+ # >>> a = np.array([[1, 1, 1, 1, 1, 0],
37
+ # ... [1, 1, 1, 0, 1, 0],
38
+ # ... [1, 0, 0, 1, 1, 0],
39
+ # ... [1, 1, 1, 1, 1, 0]], bool)
40
+ # >>> b = morphology.remove_small_holes(a, 2)
41
+ # >>> b
42
+ # array([[ True, True, True, True, True, False],
43
+ # [ True, True, True, True, True, False],
44
+ # [ True, False, False, True, True, False],
45
+ # [ True, True, True, True, True, False]], dtype=bool)
46
+ # >>> c = morphology.remove_small_holes(a, 2, connectivity=2)
47
+ # >>> c
48
+ # array([[ True, True, True, True, True, False],
49
+ # [ True, True, True, False, True, False],
50
+ # [ True, False, False, True, True, False],
51
+ # [ True, True, True, True, True, False]], dtype=bool)
52
+ # >>> d = morphology.remove_small_holes(a, 2, in_place=True)
53
+ # >>> d is a
54
+ # True
55
+ # Notes
56
+ # -----
57
+ # If the array type is int, it is assumed that it contains already-labeled
58
+ # objects. The labels are not kept in the output image (this function always
59
+ # outputs a bool image). It is suggested that labeling is completed after
60
+ # using this function.
61
+ # """
62
+ # _check_dtype_supported(ar)
63
+
64
+ #Creates warning if image is an integer image
65
+ # if ar.dtype != bool:
66
+ # warnings.warn("Any labeled images will be returned as a boolean array. "
67
+ # "Did you mean to use a boolean array?", UserWarning)
68
+
69
+ if in_place:
70
+ out = ar
71
+ else:
72
+ out = ar.copy()
73
+
74
+ #Creating the inverse of ar
75
+ if in_place:
76
+ out = np.logical_not(out,out)
77
+ else:
78
+ out = np.logical_not(out)
79
+
80
+ #removing small objects from the inverse of ar
81
+ out = morphology.remove_small_objects(out, min_size, connectivity, in_place)
82
+
83
+ if in_place:
84
+ out = np.logical_not(out,out)
85
+ else:
86
+ out = np.logical_not(out)
87
+
88
+ return out
AV/Tools/Standardize.py ADDED
@@ -0,0 +1,45 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+
2
+
3
+ from __future__ import division
4
+ import numpy as np
5
+ import cv2
6
+
7
+ def standardize(img,mask,wsize):
8
+ """
9
+ Convert the image values to standard images.
10
+ :param img:
11
+ :param mask:
12
+ :param wsize:
13
+ :return:
14
+ """
15
+
16
+ if wsize == 0:
17
+ simg=globalstandardize(img,mask)
18
+ else:
19
+ img[mask == 0]=0
20
+ img_mean=cv2.blur(img, ksize=wsize)
21
+ img_squared_mean = cv2.blur(img*img, ksize=wsize)
22
+ img_std = np.sqrt(img_squared_mean - img_mean*img_mean)
23
+ simg=(img - img_mean) / img_std
24
+ simg[img_std == 0]=0
25
+ simg[mask == 0]=0
26
+ return simg
27
+
28
+ def globalstandardize(img,mask):
29
+
30
+ usedpixels = np.double(img[mask == 1])
31
+ m=np.mean(usedpixels)
32
+ s=np.std(usedpixels)
33
+ simg=np.zeros(img.shape)
34
+ simg[mask == 1]=(usedpixels - m) / s
35
+ return simg
36
+
37
+ def getmean(x):
38
+ usedx=x[x != 0]
39
+ m=np.mean(usedx)
40
+ return m
41
+
42
+ def getstd(x):
43
+ usedx=x[x != 0]
44
+ s=np.std(usedx)
45
+ return s
AV/Tools/__init__.py ADDED
File without changes
AV/Tools/__pycache__/AVclassifiation.cpython-39.pyc ADDED
Binary file (5 kB). View file
 
AV/Tools/__pycache__/AVclassifiationMetrics.cpython-310.pyc ADDED
Binary file (10.1 kB). View file
 
AV/Tools/__pycache__/AVclassifiationMetrics.cpython-39.pyc ADDED
Binary file (5.31 kB). View file
 
AV/Tools/__pycache__/AVclassifiationMetrics_v1.cpython-39.pyc ADDED
Binary file (5.82 kB). View file
 
AV/Tools/__pycache__/BGR2RGB.cpython-39.pyc ADDED
Binary file (548 Bytes). View file
 
AV/Tools/__pycache__/BinaryPostProcessing.cpython-310.pyc ADDED
Binary file (1.96 kB). View file
 
AV/Tools/__pycache__/BinaryPostProcessing.cpython-39.pyc ADDED
Binary file (1.98 kB). View file
 
AV/Tools/__pycache__/ImageResize.cpython-310.pyc ADDED
Binary file (4.58 kB). View file
 
AV/Tools/__pycache__/ImageResize.cpython-39.pyc ADDED
Binary file (4.5 kB). View file
 
AV/Tools/__pycache__/Remove_small_holes.cpython-310.pyc ADDED
Binary file (2.79 kB). View file
 
AV/Tools/__pycache__/Remove_small_holes.cpython-39.pyc ADDED
Binary file (2.82 kB). View file
 
AV/Tools/__pycache__/__init__.cpython-310.pyc ADDED
Binary file (126 Bytes). View file
 
AV/Tools/__pycache__/__init__.cpython-39.pyc ADDED
Binary file (129 Bytes). View file
 
AV/Tools/__pycache__/data_augmentation.cpython-310.pyc ADDED
Binary file (2.52 kB). View file
 
AV/Tools/__pycache__/data_augmentation.cpython-39.pyc ADDED
Binary file (2.57 kB). View file
 
AV/Tools/__pycache__/evalution_vessel.cpython-310.pyc ADDED
Binary file (1.46 kB). View file
 
AV/Tools/__pycache__/global2patch_AND_patch2global.cpython-39.pyc ADDED
Binary file (3.3 kB). View file
 
AV/Tools/__pycache__/utils.cpython-310.pyc ADDED
Binary file (3.06 kB). View file
 
AV/Tools/__pycache__/utils.cpython-39.pyc ADDED
Binary file (3.06 kB). View file
 
AV/Tools/__pycache__/utils_test.cpython-39.pyc ADDED
Binary file (10.3 kB). View file
 
AV/Tools/__pycache__/warmup.cpython-39.pyc ADDED
Binary file (2.91 kB). View file
 
AV/Tools/centerline_evaluation.py ADDED
@@ -0,0 +1,160 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import numpy as np
2
+ import cv2
3
+ import os
4
+ import natsort
5
+ from Tools.Hemelings_eval import evaluation_code
6
+ import pandas as pd
7
+
8
+ def getFolds(ImgPath, LabelPath, k_fold_idx,k_fold, trainset=True):
9
+ print(f'ImgPath: {ImgPath}')
10
+ print(f'LabelPath: {LabelPath}')
11
+ print(f'k_fold_idx: {k_fold_idx}')
12
+ print(f'k_fold: {k_fold}')
13
+ print(f'trainset: {trainset}')
14
+ for dirpath,dirnames,filenames in os.walk(ImgPath):
15
+ ImgDirAll = filenames
16
+ break
17
+
18
+ for dirpath,dirnames,filenames in os.walk(LabelPath):
19
+ LabelDirAll = filenames
20
+ break
21
+ ImgDir = []
22
+ LabelDir = []
23
+
24
+ ImgDir_testset = []
25
+ LabelDir_testset = []
26
+
27
+ if k_fold >0:
28
+ ImgDirAll = natsort.natsorted(ImgDirAll)
29
+ LabelDirAll = natsort.natsorted(LabelDirAll)
30
+ num_fold = len(ImgDirAll) // k_fold
31
+ for i in range(k_fold):
32
+ start_idx = i * num_fold
33
+ end_idx = (i+1) * num_fold
34
+ # if i == k_fold_idx:
35
+ ImgDir_testset.extend(ImgDirAll[start_idx:end_idx])
36
+ LabelDir_testset.extend(LabelDirAll[start_idx:end_idx])
37
+ #continue
38
+ ImgDir.extend(ImgDirAll[start_idx:end_idx])
39
+ LabelDir.extend(LabelDirAll[start_idx:end_idx])
40
+ if not trainset:
41
+ return ImgDir_testset, LabelDir_testset
42
+ return ImgDir, ImgDir
43
+
44
+ def centerline_eval(ProMap, config):
45
+ if config.dataset_name == 'hrf':
46
+ ImgPath = os.path.join(config.trainset_path,'test', 'images')
47
+ LabelPath = os.path.join(config.trainset_path,'test', 'ArteryVein_0410_final')
48
+ elif config.dataset_name == 'DRIVE':
49
+ dataroot = './data/AV_DRIVE/test'
50
+ LabelPath = os.path.join(dataroot, 'av')
51
+ else: #if config.dataset_name == 'INSPIRE':
52
+ dataroot = './data/INSPIRE_AV'
53
+ LabelPath = os.path.join(dataroot, 'label')
54
+ DF_disc = pd.read_excel('./Tools/DiskParameters_INSPIRE_resize.xls', sheet_name=0)
55
+ if config.dataset_name == 'hrf':
56
+ k_fold_idx = config.k_fold_idx
57
+ k_fold = config.k_fold
58
+
59
+ ImgList0 , LabelList0 = getFolds(ImgPath, LabelPath, k_fold_idx,k_fold, trainset=False)
60
+ overall_value = [[0,0] for i in range(3)]
61
+ overall_value.append([0,0,0,0])
62
+ overall_value.append(0)
63
+ img_num = ProMap.shape[0]
64
+ for i in range(img_num):
65
+ arteryImg = ProMap[i, 0, :, :]
66
+ veinImg = ProMap[i, 1, :, :]
67
+ vesselImg = ProMap[i, 2, :, :]
68
+
69
+ idx = str(i+1)
70
+ idx = idx.zfill(2)
71
+ if config.dataset_name == 'hrf':
72
+ imgName = ImgList0[i]
73
+ elif config.dataset_name == 'DRIVE':
74
+ imgName = idx + '_test.png'
75
+ else:
76
+ imgName = 'image'+ str(i+1) + '_ManualAV.png'
77
+ gt_path = os.path.join(LabelPath , imgName)
78
+ print(gt_path)
79
+ gt = cv2.imread(gt_path)
80
+ gt = cv2.cvtColor(gt, cv2.COLOR_BGR2RGB)
81
+ if config.dataset_name == 'hrf':
82
+ gt = cv2.resize(gt, (1200,800))
83
+ gt_vessel = gt[:,:,0]+gt[:,:,2]
84
+
85
+ h, w = arteryImg.shape
86
+
87
+ ArteryPred = np.float32(arteryImg)
88
+ VeinPred = np.float32(veinImg)
89
+ VesselPred = np.float32(vesselImg)
90
+ AVSeg1 = np.zeros((h, w, 3))
91
+ vesselSeg = np.zeros((h, w))
92
+ th = 0
93
+ vesselPixels = np.where(gt_vessel)#(VesselPred>th) #
94
+
95
+ for k in np.arange(len(vesselPixels[0])):
96
+ row = vesselPixels[0][k]
97
+ col = vesselPixels[1][k]
98
+ if ArteryPred[row, col] >= VeinPred[row, col]:
99
+ AVSeg1[row, col] = (255, 0, 0)
100
+ else:
101
+ AVSeg1[row, col] = ( 0, 0, 255)
102
+
103
+ AVSeg1 = np.float32(AVSeg1)
104
+ AVSeg1 = np.uint8(AVSeg1)
105
+
106
+ if config.dataset_name == 'INSPIRE':
107
+ discCenter = (DF_disc.loc[i, 'DiskCenterRow'], DF_disc.loc[i, 'DiskCenterCol'])
108
+ discRadius = DF_disc.loc[i, 'DiskRadius']
109
+ MaskDisc = np.ones((h, w), np.uint8)
110
+ cv2.circle(MaskDisc, center=(discCenter[1], discCenter[0]), radius= discRadius, color=0, thickness=-1)
111
+ out = evaluation_code(AVSeg1, gt, mask=MaskDisc,use_mask=True)
112
+ else:
113
+ out = evaluation_code(AVSeg1, gt)
114
+
115
+ for j in range(len(out)):
116
+ if j == 4:
117
+ overall_value[j] += out[j]
118
+ continue
119
+ if j == 3:
120
+ overall_value[j][0] += out[j][0]
121
+ overall_value[j][1] += out[j][1]
122
+ overall_value[j][2] += out[j][2]
123
+ overall_value[j][3] += out[j][3]
124
+ continue
125
+
126
+ overall_value[j][0] += out[j][0]
127
+ overall_value[j][1] += out[j][1]
128
+
129
+ # print("overall_value:", overall_value)
130
+ for j in range(len(overall_value)):
131
+ if j == 4:
132
+ overall_value[j] /= img_num
133
+ continue
134
+ if j == 3:
135
+ overall_value[j][0] /= img_num
136
+ overall_value[j][1] /= img_num
137
+ overall_value[j][2] /= img_num
138
+ overall_value[j][3] /= img_num
139
+ continue
140
+ overall_value[j][0] /= img_num
141
+ overall_value[j][1] /= img_num
142
+ # print
143
+ metrics_names = ['full image', 'discovered centerline pixels', 'vessels wider than two pixels', 'all centerline', 'vessel detection rate']
144
+ filewriter = ""
145
+ print("--------------------------Centerline---------------------------------")
146
+ filewriter += "--------------------------Centerline---------------------------------\n"
147
+ for j in range(len(overall_value)):
148
+ if j == 4:
149
+ print("{} - Ratio:{}".format(metrics_names[j], overall_value[j]))
150
+ filewriter += "{} - Ratio:{}\n".format(metrics_names[j], overall_value[j])
151
+ continue
152
+ if j == 3:
153
+ print("{} - Acc: {} , F1:{}, Sens:{}, Spec:{}".format(metrics_names[j], overall_value[j][0],overall_value[j][1],overall_value[j][2],overall_value[j][3]))
154
+ filewriter += "{} - Acc: {} , F1:{}, Sens:{}, Spec:{}\n".format(metrics_names[j], overall_value[j][0],overall_value[j][1],overall_value[j][2],overall_value[j][3])
155
+ continue
156
+
157
+ print("{} - Acc: {} , F1:{}".format(metrics_names[j], overall_value[j][0],overall_value[j][1]))
158
+ filewriter += "{} - Acc: {} , F1:{}\n".format(metrics_names[j], overall_value[j][0],overall_value[j][1])
159
+
160
+ return filewriter
AV/Tools/data_augmentation.py ADDED
@@ -0,0 +1,94 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import numpy as np
2
+ import tensorlayer as tl
3
+
4
+ def data_augmentation1_5(*args):
5
+ # image3 = np.expand_dims(image3,-1)
6
+ args = tl.prepro.rotation_multi(args, rg=180, is_random=True,
7
+ fill_mode='reflect')
8
+ args = np.squeeze(args).astype(np.float32)
9
+
10
+ return args
11
+
12
+ def data_augmentation3_5(*args):
13
+ # image3 = np.expand_dims(image3,-1)
14
+ args = tl.prepro.shift_multi(args, wrg=0.10, hrg=0.10, is_random=True,
15
+ fill_mode='reflect')
16
+ args = np.squeeze(args).astype(np.float32)
17
+
18
+ return args
19
+
20
+ def data_augmentation4_5(*args):
21
+
22
+ args = tl.prepro.swirl_multi(args,is_random=True)
23
+ args = np.squeeze(args).astype(np.float32)
24
+
25
+ return args
26
+
27
+ def data_augmentation2_5(*args):
28
+ # image3 = np.expand_dims(image3,-1)
29
+ args = tl.prepro.zoom_multi(args, zoom_range=[0.5, 2.5], is_random=True,
30
+ fill_mode='reflect')
31
+ args = np.squeeze(args).astype(np.float32)
32
+
33
+ return args
34
+
35
+ def data_aug5_old(data_mat, label_mat, label_data_centerness, choice):
36
+ data_mat = np.transpose(data_mat, (1, 2, 0))
37
+ label_mat = np.transpose(label_mat, (1, 2, 0))
38
+ label_data_centerness = np.transpose(label_data_centerness, (1, 2, 0))
39
+
40
+ if choice == 0:
41
+ data_mat = data_mat
42
+ label_mat = label_mat
43
+ label_data_centerness = label_data_centerness
44
+
45
+ elif choice == 1:
46
+ data_mat = np.fliplr(data_mat)
47
+ label_mat = np.fliplr(label_mat)
48
+ label_data_centerness = np.fliplr(label_data_centerness)
49
+
50
+ elif choice == 2:
51
+ data_mat = np.flipud(data_mat)
52
+ label_mat = np.flipud(label_mat)
53
+ label_data_centerness = np.flipud(label_data_centerness)
54
+
55
+ elif choice == 3:
56
+ data_mat, label_mat, label_data_centerness= data_augmentation1_5(data_mat, label_mat, label_data_centerness)
57
+ elif choice == 4:
58
+ data_mat, label_mat, label_data_centerness= data_augmentation2_5(data_mat, label_mat, label_data_centerness)
59
+ elif choice == 5:
60
+ data_mat, label_mat, label_data_centerness= data_augmentation3_5(data_mat, label_mat, label_data_centerness)
61
+ elif choice == 6:
62
+ data_mat, label_mat, label_data_centerness= data_augmentation4_5(data_mat, label_mat, label_data_centerness)
63
+
64
+ data_mat = np.transpose(data_mat, (2, 0, 1))
65
+ label_mat = np.transpose(label_mat, (2, 0, 1))
66
+ label_data_centerness = np.transpose(label_data_centerness, (2, 0, 1))
67
+
68
+
69
+ return data_mat, label_mat, label_data_centerness
70
+
71
+ # data augmentation for variable number of input
72
+ def data_aug5(*args,choice):
73
+ datas=[np.transpose(item, (1, 2, 0)) for item in args]
74
+
75
+ if choice==1:
76
+ datas=[np.fliplr(item) for item in datas]
77
+ elif choice==2:
78
+ datas = [np.flipud(item) for item in datas]
79
+ elif choice==3:
80
+ datas = data_augmentation1_5(*datas)
81
+ elif choice==4:
82
+ datas = data_augmentation2_5(*datas)
83
+ elif choice==5:
84
+ datas = data_augmentation3_5(*datas)
85
+ elif choice==6:
86
+ datas = data_augmentation4_5(*datas)
87
+
88
+ datas = [np.transpose(item, (2, 0, 1)) for item in datas]
89
+
90
+ return tuple(datas)
91
+
92
+
93
+
94
+
AV/Tools/evalution_vessel.py ADDED
@@ -0,0 +1,75 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # -*- coding: utf-8 -*-
2
+ ###################################################
3
+ #
4
+ # Script to
5
+ # - Calculate prediction of the test dataset
6
+ # - Calculate the parameters to evaluate the prediction
7
+ #
8
+ ##################################################
9
+
10
+ #Python
11
+ import numpy as np
12
+ from sklearn.metrics import roc_curve
13
+ from sklearn.metrics import roc_auc_score,f1_score,jaccard_score
14
+ from sklearn.metrics import confusion_matrix
15
+ from sklearn.metrics import precision_recall_curve
16
+
17
+ from lib.extract_patches2 import pred_only_FOV
18
+
19
+
20
+ def evalue(preImg, gtruth_masks, test_border_masks):
21
+
22
+ #predictions only inside the FOV
23
+ y_scores, y_true = pred_only_FOV(preImg,gtruth_masks, test_border_masks) #returns data only inside the FOV
24
+
25
+ #Area under the ROC curve
26
+ fpr, tpr, thresholds = roc_curve((y_true), y_scores)
27
+ AUC_ROC = roc_auc_score(y_true, y_scores)
28
+ # test_integral = np.trapz(tpr,fpr) #trapz is numpy integration
29
+
30
+ #Precision-recall curve
31
+ precision, recall, thresholds = precision_recall_curve(y_true, y_scores)
32
+ precision = np.fliplr([precision])[0] #so the array is increasing (you won't get negative AUC)
33
+ recall = np.fliplr([recall])[0] #so the array is increasing (you won't get negative AUC)
34
+
35
+
36
+ #Confusion matrix
37
+ threshold_confusion = 0.5
38
+
39
+ y_pred = np.empty((y_scores.shape[0]))
40
+ for i in range(y_scores.shape[0]):
41
+ if y_scores[i]>=threshold_confusion:
42
+ y_pred[i]=1
43
+ else:
44
+ y_pred[i]=0
45
+ confusion = confusion_matrix(y_true, y_pred)
46
+
47
+ accuracy = 0
48
+ if float(np.sum(confusion))!=0:
49
+ accuracy = float(confusion[0,0]+confusion[1,1])/float(np.sum(confusion))
50
+
51
+ specificity = 0
52
+ if float(confusion[0,0]+confusion[0,1])!=0:
53
+ specificity = float(confusion[0,0])/float(confusion[0,0]+confusion[0,1])
54
+
55
+ sensitivity = 0
56
+ if float(confusion[1,1]+confusion[1,0])!=0:
57
+ sensitivity = float(confusion[1,1])/float(confusion[1,1]+confusion[1,0])
58
+
59
+ precision = 0
60
+ if float(confusion[1,1]+confusion[0,1])!=0:
61
+ precision = float(confusion[1,1])/float(confusion[1,1]+confusion[0,1])
62
+
63
+ #Jaccard similarity index
64
+ #jaccard_index = jaccard_similarity_score(y_true, y_pred, normalize=True)
65
+
66
+
67
+ #F1 score
68
+ F1_score = f1_score(y_true, y_pred, labels=None, average='binary', sample_weight=None)
69
+ iou_score = jaccard_score(y_true, y_pred)
70
+ dice_score = 2*iou_score/(1+iou_score)
71
+
72
+
73
+ return AUC_ROC,accuracy,specificity,sensitivity,F1_score,dice_score,iou_score
74
+
75
+
AV/Tools/global2patch_AND_patch2global.py ADDED
@@ -0,0 +1,106 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # -*- encoding: utf-8 -*-
2
+ # author Victorshengw
3
+
4
+
5
+ import os
6
+ import numpy as np
7
+ from torchvision import transforms
8
+ from torch.autograd import Variable
9
+ import torch
10
+ from PIL import Image
11
+
12
+
13
+ def get_patch_info(shape, p_size):
14
+ '''
15
+ shape: origin image size, (x, y)
16
+ p_size: patch size (square)
17
+ return: n_x, n_y, step_x, step_y
18
+ '''
19
+ x = shape[0]
20
+ y = shape[1]
21
+ if x==p_size and y==p_size:
22
+ return 1, 1, 0, 0
23
+
24
+ n = m = 1
25
+ while x > n * p_size:
26
+ n += 1
27
+ while p_size - 1.0 * (x - p_size) / (n - 1) < p_size/4:
28
+ n += 1
29
+ while y > m * p_size:
30
+ m += 1
31
+ while p_size - 1.0 * (y - p_size) / (m - 1) < p_size/4:
32
+ m += 1
33
+ return n, m, (x - p_size) * 1.0 / (n - 1), (y - p_size) * 1.0 / (m - 1)
34
+
35
+
36
+
37
+ def global2patch(images, p_size):
38
+ '''
39
+ image/label => patches
40
+ p_size: patch size
41
+ return: list of PIL patch images; coordinates: images->patches; ratios: (h, w)
42
+ '''
43
+ patches = []; coordinates = []; templates = []; sizes = []; ratios = [(0, 0)] * len(images); patch_ones = np.ones(p_size)
44
+ for i in range(len(images)):
45
+ w, h = images[i].size
46
+ size = (h, w)
47
+ sizes.append(size)
48
+ ratios[i] = (float(p_size[0]) / size[0], float(p_size[1]) / size[1])
49
+ template = np.zeros(size)
50
+ n_x, n_y, step_x, step_y = get_patch_info(size, p_size[0])
51
+ patches.append([images[i]] * (n_x * n_y))
52
+ coordinates.append([(0, 0)] * (n_x * n_y))
53
+ for x in range(n_x):
54
+ if x < n_x - 1: top = int(np.round(x * step_x))
55
+ else: top = size[0] - p_size[0]
56
+ for y in range(n_y):
57
+ if y < n_y - 1: left = int(np.round(y * step_y))
58
+ else: left = size[1] - p_size[1]
59
+ template[top:top+p_size[0], left:left+p_size[1]] += patch_ones
60
+ coordinates[i][x * n_y + y] = (1.0 * top / size[0], 1.0 * left / size[1])
61
+ patches[i][x * n_y + y] = transforms.functional.crop(images[i], top, left, p_size[0], p_size[1])
62
+
63
+ # patches[i][x * n_y + y].show()
64
+ templates.append(Variable(torch.Tensor(template).expand(1, 1, -1, -1)))
65
+ return patches, coordinates, templates, sizes, ratios
66
+
67
+ def patch2global(patches, n_class, sizes, coordinates, p_size,flag = 0):
68
+ '''
69
+ predicted patches (after classify layer) => predictions
70
+ return: list of np.array
71
+ '''
72
+ patches = np.array(torch.detach(patches).cpu().numpy())
73
+ predictions = [ np.zeros((n_class, size[0], size[1])) for size in sizes]
74
+
75
+ for i in range(len(sizes)):
76
+ for j in range(len(coordinates[i])):
77
+ top, left = coordinates[i][j]
78
+ top = int(np.round(top * sizes[i][0])); left = int(np.round(left * sizes[i][1]))
79
+
80
+ patches_tmp = np.zeros(patches[j][:,:,:].shape)
81
+ whole_img_tmp = predictions[i][:, top: top + p_size[0], left: left + p_size[1]]
82
+ #俩小块儿最大(max)的成为最终的prediction
83
+ #patches[j][:,:,:] 就是每个要贴到大图中的小块儿,whole_img_tmp是整个目标大图中对应patches_tmp的那一小块儿,然后将这俩及逆行比较,谁大就取谁
84
+ if flag == 0:
85
+ patches_tmp[patches[j][:, :, :] > whole_img_tmp] = patches[j][:,:,:][patches[j][:, :, :] > whole_img_tmp] # 要贴上去的小块中的值大于大图中的值 patches[j][:, :, :] > whole_img_tmp
86
+ patches_tmp[patches[j][:, :, :] < whole_img_tmp] = whole_img_tmp[patches[j][:, :, :] < whole_img_tmp] # 要贴上去的小块中的值小于于大图中的值 patches[j][:, :, :] < whole_img_tmp
87
+ predictions[i][:, top: top + p_size[0], left: left + p_size[1]] += patches_tmp
88
+ else:
89
+
90
+
91
+ predictions[i][:, top: top + p_size[0], left: left + p_size[1]] += patches[j][:, :, :]
92
+
93
+
94
+ return predictions
95
+
96
+
97
+ if __name__ == '__main__':
98
+ images = []
99
+
100
+ img = Image.open(os.path.join(r"../train_valid/003DRIVE/image", f"01.png"))
101
+ images.append(img)
102
+ # print(len(images)) = 3
103
+ p_size = (224,224)
104
+ patches, coordinates, templates, sizes, ratios = global2patch(images, p_size)
105
+ # predictions = patch2global(patches, 3, sizes, coordinates, p_size)
106
+ # print(type(predictions))
AV/Tools/utils_test.py ADDED
@@ -0,0 +1,353 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import cv2
2
+ import numpy as np
3
+
4
+ def paint_border_overlap(img, patch_h, patch_w, stride_h, stride_w):
5
+ img_h = img.shape[0] #height of the full image
6
+ img_w = img.shape[1] #width of the full image
7
+ leftover_h = (img_h-patch_h)%stride_h #leftover on the h dim
8
+ leftover_w = (img_w-patch_w)%stride_w #leftover on the w dim
9
+ if (leftover_h != 0): #change dimension of img_h
10
+ tmp_full_imgs = np.zeros((img_h+(stride_h-leftover_h),img_w, 3))
11
+ tmp_full_imgs[0:img_h,0:img_w, :] = img
12
+ img = tmp_full_imgs
13
+ if (leftover_w != 0): #change dimension of img_w
14
+ tmp_full_imgs = np.zeros((img.shape[0], img_w+(stride_w - leftover_w), 3))
15
+ tmp_full_imgs[0:img.shape[0], 0:img_w, :] = img
16
+ img = tmp_full_imgs
17
+ return img
18
+
19
+ def paint_border_overlap_trad(img, patch_h, patch_w, stride_h, stride_w):
20
+ img_h = img.shape[0] #height of the full image
21
+ img_w = img.shape[1] #width of the full image
22
+ leftover_h = (img_h-patch_h)%stride_h #leftover on the h dim
23
+ leftover_w = (img_w-patch_w)%stride_w #leftover on the w dim
24
+ if (leftover_h != 0): #change dimension of img_h
25
+ tmp_full_imgs = np.zeros((img_h+(stride_h-leftover_h),img_w, 2))
26
+ tmp_full_imgs[0:img_h,0:img_w, :] = img
27
+ img = tmp_full_imgs
28
+ if (leftover_w != 0): #change dimension of img_w
29
+ tmp_full_imgs = np.zeros((img.shape[0], img_w+(stride_w - leftover_w), 2))
30
+ tmp_full_imgs[0:img.shape[0], 0:img_w, :] = img
31
+ img = tmp_full_imgs
32
+ return img
33
+
34
+ def pred_only_FOV_AV(data_imgs1,data_imgs2,data_masks1,data_masks2,original_imgs_border_masks,threshold_confusion):
35
+ assert (len(data_imgs1.shape)==4 and len(data_masks1.shape)==4) #4D arrays
36
+ assert (data_imgs1.shape[0]==data_masks1.shape[0])
37
+ assert (data_imgs1.shape[2]==data_masks1.shape[2])
38
+ assert (data_imgs1.shape[3]==data_masks1.shape[3])
39
+ assert (data_imgs1.shape[1]==1 and data_masks1.shape[1]==1) #check the channel is 1
40
+ height = data_imgs1.shape[2]
41
+ width = data_imgs1.shape[3]
42
+ new_pred_imgs1 = []
43
+ new_pred_masks1 = []
44
+ new_pred_imgs2 = []
45
+ new_pred_masks2 = []
46
+ for i in range(data_imgs1.shape[0]): #loop over the full images
47
+ for x in range(width):
48
+ for y in range(height):
49
+ if inside_FOV_DRIVE_AV(i,x,y,data_imgs1,data_imgs2,original_imgs_border_masks,threshold_confusion)==True:
50
+ new_pred_imgs1.append(data_imgs1[i,:,y,x])
51
+ new_pred_masks1.append(data_masks1[i,:,y,x])
52
+ new_pred_imgs2.append(data_imgs2[i,:,y,x])
53
+ new_pred_masks2.append(data_masks2[i,:,y,x])
54
+ new_pred_imgs1 = np.asarray(new_pred_imgs1)
55
+ new_pred_masks1 = np.asarray(new_pred_masks1)
56
+ new_pred_imgs2 = np.asarray(new_pred_imgs2)
57
+ new_pred_masks2 = np.asarray(new_pred_masks2)
58
+ return new_pred_imgs1, new_pred_masks1,new_pred_imgs2, new_pred_masks2
59
+
60
+ def pred_only_FOV_AV(data_imgs1,data_imgs2,data_masks1,data_masks2,original_imgs_border_masks,threshold_confusion):
61
+ assert (len(data_imgs1.shape)==4 and len(data_masks1.shape)==4) #4D arrays
62
+ assert (data_imgs1.shape[0]==data_masks1.shape[0])
63
+ assert (data_imgs1.shape[2]==data_masks1.shape[2])
64
+ assert (data_imgs1.shape[3]==data_masks1.shape[3])
65
+ assert (data_imgs1.shape[1]==1 and data_masks1.shape[1]==1) #check the channel is 1
66
+ height = data_imgs1.shape[2]
67
+ width = data_imgs1.shape[3]
68
+ new_pred_imgs1 = []
69
+ new_pred_masks1 = []
70
+ new_pred_imgs2 = []
71
+ new_pred_masks2 = []
72
+ for i in range(data_imgs1.shape[0]): #loop over the full images
73
+
74
+
75
+ for x in range(width):
76
+ for y in range(height):
77
+ if inside_FOV_DRIVE_AV(i,x,y,data_masks1,data_masks2,original_imgs_border_masks,threshold_confusion)==True:
78
+ new_pred_imgs1.append(data_imgs1[i,:,y,x])
79
+ new_pred_masks1.append(data_masks1[i,:,y,x])
80
+ new_pred_imgs2.append(data_imgs2[i,:,y,x])
81
+ new_pred_masks2.append(data_masks2[i,:,y,x])
82
+ new_pred_imgs1 = np.asarray(new_pred_imgs1)
83
+ new_pred_masks1 = np.asarray(new_pred_masks1)
84
+ new_pred_imgs2 = np.asarray(new_pred_imgs2)
85
+ new_pred_masks2 = np.asarray(new_pred_masks2)
86
+ return new_pred_imgs1, new_pred_masks1,new_pred_imgs2, new_pred_masks2
87
+
88
+
89
+
90
+ def inside_FOV_DRIVE_AV(i, x, y,data_imgs1,data_imgs2, DRIVE_masks,threshold_confusion):
91
+ assert (len(DRIVE_masks.shape)==4) #4D arrays
92
+ assert (DRIVE_masks.shape[1]==1) #DRIVE masks is black and white
93
+ # DRIVE_masks = DRIVE_masks/255. #NOOO!! otherwise with float numbers takes forever!!
94
+
95
+ if (x >= DRIVE_masks.shape[3] or y >= DRIVE_masks.shape[2]): #my image bigger than the original
96
+ return False
97
+
98
+ if (DRIVE_masks[i,0,y,x]>0)&((data_imgs1[i,0,y,x]>threshold_confusion)|(data_imgs2[i,0,y,x]>threshold_confusion)): #0==black pixels
99
+ # print DRIVE_masks[i,0,y,x] #verify it is working right
100
+ return True
101
+ else:
102
+ return False
103
+
104
+ def extract_ordered_overlap_trad(img, patch_h, patch_w,stride_h,stride_w,ratio):
105
+ img_h = img.shape[0] #height of the full image
106
+ img_w = img.shape[1] #width of the full image
107
+ assert ((img_h-patch_h)%stride_h==0 and (img_w-patch_w)%stride_w==0)
108
+ N_patches_img = ((img_h-patch_h)//stride_h+1)*((img_w-patch_w)//stride_w+1) #// --> division between integers
109
+ patches = np.empty((N_patches_img, patch_h//ratio, patch_w//ratio, 2))
110
+ iter_tot = 0 #iter over the total number of patches (N_patches)
111
+ for h in range((img_h-patch_h)//stride_h+1):
112
+ for w in range((img_w-patch_w)//stride_w+1):
113
+ patch = img[h*stride_h:(h*stride_h)+patch_h, w*stride_w:(w*stride_w)+patch_w, :]
114
+ patch = cv2.resize(patch,(patch_h//ratio, patch_w//ratio))
115
+ patches[iter_tot]=patch
116
+ iter_tot +=1 #total
117
+ assert (iter_tot==N_patches_img)
118
+ return patches #array with all the img divided in patches
119
+
120
+ def make_pad(Patches,pad,sign='',normalize=True):
121
+ # h,w,3
122
+ mean = [0.485, 0.456, 0.406]
123
+ std = [0.229, 0.224, 0.225]
124
+ if pad<=0:
125
+ return Patches
126
+ p = np.zeros((256,256,3),dtype=np.float32)
127
+ if sign=='img' and normalize:
128
+ p[:,:,0] = (p[:,:,0] - mean[0]) / std[0]
129
+ p[:,:,1] = (p[:,:,1] - mean[1]) / std[1]
130
+ p[:,:,2] = (p[:,:,2] - mean[2]) / std[2]
131
+ p[:Patches.shape[0],:Patches.shape[1],:] = Patches
132
+ return p
133
+
134
+ def extract_ordered_overlap_big(img, patch_h=256, patch_w=256, stride_h=256, stride_w=256):
135
+ img_h = img.shape[0] #height of the full image
136
+ img_w = img.shape[1] #width of the full image
137
+
138
+ big_patch_h = int(patch_h * 1.5)
139
+ big_patch_w = int(patch_w * 1.5)
140
+ assert ((img_h-patch_h)%stride_h==0 and (img_w-patch_w)%stride_w==0)
141
+ N_patches_img = ((img_h-patch_h)//stride_h+1)*((img_w-patch_w)//stride_w+1) #// --> division between integers
142
+ patches = np.empty((N_patches_img, patch_h, patch_w, 3) )
143
+ patches_big = np.empty((N_patches_img, patch_h, patch_w, 3))
144
+ img_big = np.zeros((img_h+(big_patch_h-patch_h), img_w+(big_patch_w-patch_w), 3))
145
+
146
+ img_big[(big_patch_h-patch_h)//2:(big_patch_h-patch_h)//2+img_h, (big_patch_w-patch_w)//2:(big_patch_w-patch_w)//2+img_w, :] = img
147
+
148
+ iter_tot = 0 #iter over the total number of patches (N_patches)
149
+ for h in range((img_h-patch_h)//stride_h+1):
150
+ for w in range((img_w-patch_w)//stride_w+1):
151
+ patch = img[h*stride_h:(h*stride_h)+patch_h, w*stride_w:(w*stride_w)+patch_w, :]
152
+ #patch = cv2.resize(patch,(256, 256))
153
+ patches[iter_tot] = patch
154
+ if np.unique(patch).shape[0] == 1:
155
+ patches_big[iter_tot] = patch
156
+ else:
157
+ patch_big = img_big[h*stride_h:h*stride_h+big_patch_h, w*stride_w:w*stride_w+big_patch_w, :]
158
+ # print(patch_big.shape)
159
+ patch_big = cv2.resize(patch_big,(patch_h, patch_w))
160
+ patches_big[iter_tot] = patch_big
161
+
162
+ iter_tot += 1 # total
163
+ assert (iter_tot == N_patches_img)
164
+ return patches, patches_big # array with all the img divided in patches
165
+
166
+ def extract_ordered_overlap_big_v2(img, patch_h=256, patch_w=256, stride_h=256, stride_w=256):
167
+ img_h = img.shape[0] #height of the full image
168
+ img_w = img.shape[1] #width of the full image
169
+ assert ((img_h-patch_h)%stride_h==0 and (img_w-patch_w)%stride_w==0)
170
+ N_patches_img = ((img_h-patch_h)//stride_h+1)*((img_w-patch_w)//stride_w+1) #// --> division between integers
171
+ patches = np.empty((N_patches_img, 256, 256, 3))
172
+ patches_big = np.empty((N_patches_img, 256, 256, 3))
173
+
174
+ iter_tot = 0 #iter over the total number of patches (N_patches)
175
+ for h in range((img_h-patch_h)//stride_h+1):
176
+ for w in range((img_w-patch_w)//stride_w+1):
177
+ patch = img[h*stride_h:(h*stride_h)+patch_h, w*stride_w:(w*stride_w)+patch_w, :]
178
+ pad = max(0,256-patch_h)
179
+ patch = make_pad(patch,pad,normalize=False)
180
+ #patch = cv2.resize(patch,(256, 256))
181
+ patches[iter_tot]=patch
182
+
183
+ # patch_big = img[max(0,h*stride_h-patch_h//4):min((h*stride_h)+patch_h+patch_h//4,img_h),max(0,w*stride_w-patch_w//4):min((w*stride_w)+patch_w+patch_w//4,img_w), :]
184
+ if h==0 and w==0:
185
+ patch_big = img[0:patch_h+patch_h//2, 0:patch_w+patch_w//2, :]
186
+
187
+ elif h==0 and w!=0 and w!=((img_w-patch_w)//stride_w):
188
+ patch_big = img[0:0+patch_h+patch_h//2, w*stride_w-patch_w//4:(w*stride_w)+patch_w+patch_w//4, :]
189
+ elif h==0 and w==((img_w-patch_w)//stride_w):
190
+ patch_big = img[0:0+patch_h+patch_h//2, (w)*stride_w-patch_w//2:(w*stride_w)+patch_w, :]
191
+ elif h!=0 and h!=((img_h-patch_h)//stride_h) and w==0:
192
+ patch_big = img[h*stride_h-patch_h//4:(h*stride_h)+patch_h+patch_h//4, 0:0+patch_w+patch_w//2, :]
193
+
194
+ elif h==((img_h-patch_h)//stride_h) and w==0:
195
+ patch_big = img[(h)*stride_h-patch_h//2:(h*stride_h)+patch_h, 0:patch_w+patch_w//2, :]
196
+ elif h==((img_h-patch_h)//stride_h) and w!=0 and w!=((img_w-patch_w)//stride_w):
197
+ patch_big = img[h*stride_h-patch_h//2:(h*stride_h)+patch_h, w*stride_w-patch_w//4:(w*stride_w)+patch_w+patch_w//4, :]
198
+ elif h==((img_h-patch_h)//stride_h) and w==((img_w-patch_w)//stride_w):
199
+ patch_big = img[h*stride_h-patch_h//2:(h*stride_h)+patch_h, (w)*stride_w-patch_w//2:(w*stride_w)+patch_w, :]
200
+ elif h!=0 and h!=((img_h-patch_h)//stride_h) and w==((img_w-patch_w)//stride_w):
201
+ patch_big = img[h*stride_h-patch_h//4:(h*stride_h)+patch_h+patch_h//4, (w)*stride_w-patch_w//2:(w*stride_w)+patch_w, :]
202
+ else:
203
+ patch_big = img[h*stride_h-patch_h//4:(h*stride_h)+patch_h+patch_h//4,w*stride_w-patch_w//4:(w*stride_w)+patch_w+patch_w//4, :]
204
+ # print(patch_big.shape)
205
+ patch_big = cv2.resize(patch_big,(256, 256))
206
+
207
+ patches_big[iter_tot]=patch_big
208
+
209
+ iter_tot +=1 #total
210
+ assert (iter_tot==N_patches_img)
211
+ return patches,patches_big #array with all the img divided in patches
212
+
213
+
214
+ def extract_ordered_overlap_big_v1(img, patch_h, patch_w,stride_h,stride_w):
215
+ img_h = img.shape[0] #height of the full image
216
+ img_w = img.shape[1] #width of the full image
217
+ assert ((img_h-patch_h)%stride_h==0 and (img_w-patch_w)%stride_w==0)
218
+ N_patches_img = ((img_h-patch_h)//stride_h+1)*((img_w-patch_w)//stride_w+1) #// --> division between integers
219
+ patches = np.empty((N_patches_img, patch_h, patch_w, 3))
220
+ patches_big = np.empty((N_patches_img, patch_h, patch_w, 3))
221
+
222
+ iter_tot = 0 #iter over the total number of patches (N_patches)
223
+ for h in range((img_h-patch_h)//stride_h+1):
224
+ for w in range((img_w-patch_w)//stride_w+1):
225
+ patch = img[h*stride_h:(h*stride_h)+patch_h, w*stride_w:(w*stride_w)+patch_w, :]
226
+ #patch = cv2.resize(patch,(256, 256))
227
+ patches[iter_tot]=patch
228
+ if h==0 and w==0:
229
+ patch_big = img[h*stride_h:(h*stride_h)+patch_h+stride_h, w*stride_w:(w*stride_w)+patch_w+stride_w, :]
230
+ elif h==0 and w!=0 and w!=((img_w-patch_w)//stride_w):
231
+ patch_big = img[h*stride_h:(h*stride_h)+patch_h+stride_h, int((w-0.5)*stride_w):(w*stride_w)+patch_w+stride_w//2, :]
232
+ elif h==0 and w==((img_w-patch_w)//stride_w):
233
+ patch_big = img[h*stride_h:(h*stride_h)+patch_h+stride_h, (w-1)*stride_w:(w*stride_w)+patch_w, :]
234
+ elif h!=0 and h!=((img_h-patch_h)//stride_h) and w==0:
235
+ patch_big = img[int((h-0.5)*stride_h):(h*stride_h)+patch_h+stride_h//2, w*stride_w:(w*stride_w)+patch_w+stride_w, :]
236
+
237
+ elif h==((img_h-patch_h)//stride_h) and w==0:
238
+ patch_big = img[(h-1)*stride_h:(h*stride_h)+patch_h, w*stride_w:(w*stride_w)+patch_w+stride_w, :]
239
+ elif h==((img_h-patch_h)//stride_h) and w!=0 and w!=((img_w-patch_w)//stride_w):
240
+ patch_big = img[(h-1)*stride_h:(h*stride_h)+patch_h, int((w-0.5)*stride_w):(w*stride_w)+patch_w+stride_w//2, :]
241
+ elif h==((img_h-patch_h)//stride_h) and w==((img_w-patch_w)//stride_w):
242
+ patch_big = img[(h-1)*stride_h:(h*stride_h)+patch_h, (w-1)*stride_w:(w*stride_w)+patch_w, :]
243
+ elif h!=0 and h!=((img_h-patch_h)//stride_h) and w==((img_w-patch_w)//stride_w):
244
+ patch_big = img[int((h-0.5)*stride_h):(h*stride_h)+patch_h+stride_h//2, (w-1)*stride_w:(w*stride_w)+patch_w, :]
245
+ else:
246
+ patch_big = img[int((h-0.5)*stride_h):(h*stride_h)+patch_h+stride_h//2, int((w-0.5)*stride_w):(w*stride_w)+patch_w+stride_w//2, :]
247
+
248
+ patch_big = cv2.resize(patch_big,(256, 256))
249
+
250
+ patches_big[iter_tot]=patch_big
251
+
252
+ iter_tot +=1 #total
253
+ assert (iter_tot==N_patches_img)
254
+ return patches,patches_big #array with all the img divided in patches
255
+
256
+
257
+
258
+
259
+
260
+ def pred_to_imgs(pred,mode="original"):
261
+ assert (len(pred.shape)==3) #3D array: (Npatches,height*width,2)
262
+ assert (pred.shape[2]==2 ) #check the classes are 2
263
+ pred_images = np.empty((pred.shape[0],pred.shape[1])) #(Npatches,height*width)
264
+ if mode=="original":
265
+ for i in range(pred.shape[0]):
266
+ for pix in range(pred.shape[1]):
267
+ pred_images[i,pix]=pred[i,pix,1]
268
+ elif mode=="threshold":
269
+ for i in range(pred.shape[0]):
270
+ for pix in range(pred.shape[1]):
271
+ if pred[i,pix,1]>=0.5:
272
+ pred_images[i,pix]=1
273
+ else:
274
+ pred_images[i,pix]=0
275
+ else:
276
+ print("mode " +str(mode) +" not recognized, it can be 'original' or 'threshold'")
277
+ exit()
278
+ pred_images = np.reshape(pred_images,(pred_images.shape[0],1,48,48))
279
+ return pred_images
280
+
281
+ def recompone_overlap(pred_patches, img_h, img_w, stride_h, stride_w):
282
+ assert (len(pred_patches.shape)==4) #4D arrays
283
+ #assert (pred_patches.shape[1]==2 or pred_patches.shape[1]==3) #check the channel is 1 or 3
284
+ patch_h = pred_patches.shape[2]
285
+ patch_w = pred_patches.shape[3]
286
+ N_patches_h = (img_h-patch_h)//stride_h+1
287
+ N_patches_w = (img_w-patch_w)//stride_w+1
288
+ N_patches_img = N_patches_h * N_patches_w
289
+ #assert (pred_patches.shape[0]%N_patches_img==0)
290
+ #N_full_imgs = pred_patches.shape[0]//N_patches_img
291
+ full_prob = np.zeros((pred_patches.shape[1], img_h,img_w,)) #itialize to zero mega array with sum of Probabilities
292
+ full_sum = np.zeros((pred_patches.shape[1], img_h,img_w))
293
+
294
+ k = 0 #iterator over all the patches
295
+ for h in range(N_patches_h):
296
+ for w in range(N_patches_w):
297
+ full_prob[:, h*stride_h:(h*stride_h)+patch_h, w*stride_w:(w*stride_w)+patch_w]+=pred_patches[k]
298
+ full_sum[:, h*stride_h:(h*stride_h)+patch_h, w*stride_w:(w*stride_w)+patch_w]+=1
299
+ k+=1
300
+ assert(k==pred_patches.shape[0])
301
+ assert(np.min(full_sum)>=1.0) #at least one
302
+ final_avg = full_prob/full_sum
303
+ #print(final_avg.shape)
304
+ # assert(np.max(final_avg)<=1.0) #max value for a pixel is 1.0
305
+ # assert(np.min(final_avg)>=0.0) #min value for a pixel is 0.0
306
+ return final_avg
307
+
308
+
309
+ def Normalize(Patches):
310
+ mean = [0.485, 0.456, 0.406]
311
+ std = [0.229, 0.224, 0.225]
312
+
313
+ # mean = [0.3261, 0.2287, 0.1592]
314
+ # std = [0.2589, 0.1882, 0.1369]
315
+
316
+
317
+ Patches[:,0,:,:] = (Patches[:,0,:,:] - mean[0]) / std[0]
318
+ Patches[:,1,:,:] = (Patches[:,1,:,:] - mean[1]) / std[1]
319
+ Patches[:,2,:,:] = (Patches[:,2,:,:] - mean[2]) / std[2]
320
+ return Patches
321
+
322
+ def Normalize_patch(Patches):
323
+ mean = [0.485, 0.456, 0.406]
324
+ std = [0.229, 0.224, 0.225]
325
+
326
+ Patches[0,:,:] = (Patches[0,:,:] - mean[0]) / std[0]
327
+ Patches[1,:,:] = (Patches[1,:,:] - mean[1]) / std[1]
328
+ Patches[2,:,:] = (Patches[2,:,:] - mean[2]) / std[2]
329
+ return Patches
330
+
331
+ def sigmoid(x):
332
+ return np.exp((x)) / (1 + np.exp(x))
333
+
334
+ def inside_FOV_DRIVE(x, y, DRIVE_masks):
335
+ # DRIVE_masks = DRIVE_masks/255. #NOOO!! otherwise with float numbers takes forever!!
336
+ if (x >= DRIVE_masks.shape[1] or y >= DRIVE_masks.shape[0]): #my image bigger than the original
337
+ return False
338
+
339
+ if (DRIVE_masks[y,x]>0): #0==black pixels
340
+ return True
341
+ else:
342
+ return False
343
+
344
+
345
+ def kill_border(pred_img, border_masks):
346
+ height = pred_img.shape[1]
347
+ width = pred_img.shape[2]
348
+ for x in range(width):
349
+ for y in range(height):
350
+ if inside_FOV_DRIVE(x,y, border_masks)==False:
351
+ pred_img[:,y,x]=0.0
352
+ return pred_img
353
+
AV/Tools/warmup.py ADDED
@@ -0,0 +1,69 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+
2
+
3
+ import torch
4
+ from torch.optim.lr_scheduler import StepLR, ExponentialLR
5
+
6
+ class GradualWarmupScheduler(torch.optim.lr_scheduler._LRScheduler):
7
+ """ Gradually warm-up(increasing) learning rate in optimizer.
8
+ Proposed in 'Accurate, Large Minibatch SGD: Training ImageNet in 1 Hour'.
9
+
10
+ Args:
11
+ optimizer (Optimizer): Wrapped optimizer.
12
+ multiplier: target learning rate = base lr * multiplier if multiplier > 1.0. if multiplier = 1.0, lr starts from 0 and ends up with the base_lr.
13
+ total_epoch: target learning rate is reached at total_epoch, gradually
14
+ after_scheduler: after target_epoch, use this scheduler(eg. ReduceLROnPlateau)
15
+ """
16
+
17
+ def __init__(self, optimizer, multiplier, total_epoch, after_scheduler=None):
18
+ self.multiplier = multiplier
19
+ if self.multiplier < 1.:
20
+ raise ValueError('multiplier should be greater thant or equal to 1.')
21
+ self.total_epoch = total_epoch
22
+ self.after_scheduler = after_scheduler
23
+ self.finished = False
24
+ super(GradualWarmupScheduler, self).__init__(optimizer)
25
+
26
+ def get_lr(self):
27
+ if self.last_epoch > self.total_epoch:
28
+ if self.after_scheduler:
29
+ if not self.finished:
30
+ self.after_scheduler.base_lrs = [base_lr * self.multiplier for base_lr in self.base_lrs]
31
+ self.finished = True
32
+ return self.after_scheduler.get_last_lr()
33
+ return [base_lr * self.multiplier for base_lr in self.base_lrs]
34
+
35
+ if self.multiplier == 1.0:
36
+ return [base_lr * (float(self.last_epoch) / self.total_epoch) for base_lr in self.base_lrs]
37
+ else:
38
+ return [base_lr * ((self.multiplier - 1.) * self.last_epoch / self.total_epoch + 1.) for base_lr in self.base_lrs]
39
+
40
+
41
+ def step(self, epoch=None, metrics=None):
42
+
43
+ if self.finished and self.after_scheduler:
44
+ if epoch is None:
45
+ self.after_scheduler.step()
46
+ else:
47
+ self.after_scheduler.step()
48
+ self._last_lr = self.after_scheduler.get_last_lr()
49
+ else:
50
+ return super(GradualWarmupScheduler, self).step(epoch)
51
+
52
+
53
+ if __name__ == '__main__':
54
+ model = [torch.nn.Parameter(torch.randn(2, 2, requires_grad=True))]
55
+ optim = torch.optim.Adam(model, 0.0002)
56
+
57
+ # scheduler_warmup is chained with schduler_steplr
58
+ scheduler_steplr = StepLR(optim, step_size=80, gamma=0.1)
59
+ scheduler_warmup = GradualWarmupScheduler(optim, multiplier=2, total_epoch=10, after_scheduler=scheduler_steplr)
60
+
61
+ # this zero gradient update is needed to avoid a warning message, issue #8.
62
+ optim.zero_grad()
63
+ optim.step()
64
+
65
+ for epoch in range(1, 20):
66
+ scheduler_warmup.step(epoch)
67
+ print(epoch, optim.param_groups[0]['lr'])
68
+
69
+ optim.step()
AV/config/__pycache__/config_test_general.cpython-310.pyc ADDED
Binary file (1.88 kB). View file
 
AV/config/__pycache__/config_test_general.cpython-39.pyc ADDED
Binary file (2.28 kB). View file
 
AV/config/__pycache__/config_train_general.cpython-310.pyc ADDED
Binary file (1.9 kB). View file
 
AV/config/__pycache__/config_train_general.cpython-39.pyc ADDED
Binary file (2.72 kB). View file
 
AV/config/config_test_general.py ADDED
@@ -0,0 +1,134 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import torch
2
+ import os
3
+
4
+ # Check GPU availability
5
+ use_cuda = torch.cuda.is_available()
6
+ gpu_ids = [0] if use_cuda else []
7
+ device = torch.device('cuda' if use_cuda else 'cpu')
8
+
9
+ dataset_name = 'all' # DRIVE
10
+ #dataset_name = 'LES' # LES
11
+ # dataset_name = 'hrf' # HRF
12
+ # dataset_name = 'ukbb' # UKBB
13
+ # dataset_name = 'all'
14
+ dataset = dataset_name
15
+ max_step = 30000 # 30000 for ukbb
16
+
17
+ batch_size = 8 # default: 4
18
+ print_iter = 100 # default: 100
19
+ display_iter = 100 # default: 100
20
+ save_iter = 5000 # default: 5000
21
+ first_display_metric_iter = max_step - save_iter # default: 25000
22
+ lr = 0.0002 # if dataset_name!='LES' else 0.00005 # default: 0.0002
23
+ step_size = 7000 # 7000 for DRIVE
24
+ lr_decay_gamma = 0.5 # default: 0.5
25
+ use_SGD = False # default:False
26
+
27
+ input_nc = 3
28
+ ndf = 32
29
+ netD_type = 'basic'
30
+ n_layers_D = 5
31
+ norm = 'instance'
32
+ no_lsgan = False
33
+ init_type = 'normal'
34
+ init_gain = 0.02
35
+ use_sigmoid = no_lsgan
36
+ use_noise_input_D = False
37
+ use_dropout_D = False
38
+ # torch.cuda.set_device(gpu_ids[0])
39
+ use_GAN = True # default: True
40
+
41
+ # adam
42
+ beta1 = 0.5
43
+
44
+ # settings for GAN loss
45
+ num_classes_D = 1
46
+ lambda_GAN_D = 0.01
47
+ lambda_GAN_G = 0.01
48
+ lambda_GAN_gp = 100
49
+ lambda_BCE = 5
50
+ lambda_DICE = 5
51
+
52
+ input_nc_D = input_nc + 3
53
+
54
+ # settings for centerness
55
+ use_centerness = True # default: True
56
+ lambda_centerness = 1
57
+ center_loss_type = 'centerness'
58
+ centerness_map_size = [128, 128]
59
+
60
+ # pretrained model
61
+ use_pretrained_G = True
62
+ use_pretrained_D = False
63
+ # model_path_pretrained_G = './log/patch_pretrain'
64
+ model_path_pretrained_G = ''
65
+ model_step_pretrained_G = 0
66
+ stride_height = 0
67
+ stride_width = 0
68
+ patch_size_list=[]
69
+
70
+ def set_dataset(name):
71
+ global dataset_name, model_path_pretrained_G, model_step_pretrained_G
72
+ global stride_height, stride_width,patch_size,patch_size_list,dataset
73
+ dataset_name = name
74
+ dataset = name
75
+ if dataset_name == 'DRIVE':
76
+ model_path_pretrained_G = './AV/log/DRIVE-2023_10_20_08_36_50(6500)'
77
+ model_step_pretrained_G = 6500
78
+ elif dataset_name == 'LES':
79
+ model_path_pretrained_G = './AV/log/LES-2023_09_28_14_04_06(0)'
80
+ model_step_pretrained_G = 0
81
+ elif dataset_name == 'hrf':
82
+ model_path_pretrained_G = './AV/log/HRF-2023_10_19_11_07_31(1500)'
83
+ model_step_pretrained_G = 1500
84
+ elif dataset_name == 'ukbb':
85
+ model_path_pretrained_G = './AV/log/UKBB-2023_11_02_23_22_07(5000)'
86
+ model_step_pretrained_G = 5000
87
+ else:
88
+ model_path_pretrained_G = './AV/log/ALL-2024_09_06_09_17_18(9000)'
89
+ model_step_pretrained_G = 9000
90
+ if dataset_name == 'DRIVE':
91
+ patch_size_list = [64, 128, 256]
92
+ elif dataset_name == 'LES':
93
+ patch_size_list = [96, 384, 256]
94
+ elif dataset_name == 'hrf':
95
+ patch_size_list = [64, 384, 256]
96
+ elif dataset_name == 'ukbb':
97
+ patch_size_list = [96, 384, 256]
98
+ else:
99
+ patch_size_list = [96, 384, 512]
100
+ patch_size = patch_size_list[2]
101
+
102
+ # path for dataset
103
+ if dataset_name == 'DRIVE' or dataset_name == 'LES' or dataset_name == 'hrf':
104
+ stride_height = 50
105
+ stride_width = 50
106
+ else:
107
+ stride_height = 150
108
+ stride_width = 150
109
+
110
+ n_classes = 3
111
+
112
+ model_step = 0
113
+
114
+ # use CAM
115
+ use_CAM = False
116
+
117
+ #use resize
118
+ use_resize = True
119
+ resize_w_h = (1920,512)
120
+
121
+ # use av_cross
122
+ use_av_cross = False
123
+
124
+ use_high_semantic = False
125
+ lambda_high = 1 # A,V,Vessel
126
+
127
+ # use global semantic
128
+ use_global_semantic = False
129
+ global_warmup_step = 0 if use_pretrained_G else 5000
130
+
131
+
132
+
133
+
134
+
AV/config/config_train_general.py ADDED
@@ -0,0 +1,121 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import torch
2
+ import os
3
+
4
+ # Check GPU availability
5
+ use_cuda = torch.cuda.is_available()
6
+ gpu_ids = [0] if use_cuda else []
7
+ device = torch.device('cuda' if use_cuda else 'cpu')
8
+
9
+
10
+ dataset_name = 'DRIVE' # DRIVE
11
+ #dataset_name = 'LES' # LES
12
+ #dataset_name = 'hrf' # HRF
13
+ dataset = dataset_name
14
+
15
+ max_step = 30000 # 30000 for ukbb
16
+ if dataset_name=='DRIVE':
17
+ patch_size_list = [64, 128, 256]
18
+ elif dataset_name=='LES':
19
+ patch_size_list = [96,384, 256]
20
+ elif dataset_name=='hrf':
21
+ patch_size_list = [64, 384, 256]
22
+ patch_size = patch_size_list[2]
23
+ batch_size = 8 # default: 4
24
+ print_iter = 100 # default: 100
25
+ display_iter = 100 # default: 100
26
+ save_iter = 5000 # default: 5000
27
+ first_display_metric_iter = max_step-save_iter # default: 25000
28
+ lr = 0.0002 #if dataset_name!='LES' else 0.00005 # default: 0.0002
29
+ step_size = 7000 # 7000 for DRIVE
30
+ lr_decay_gamma = 0.5 # default: 0.5
31
+ use_SGD = False # default:False
32
+
33
+ input_nc = 3
34
+ ndf = 32
35
+ netD_type = 'basic'
36
+ n_layers_D = 5
37
+ norm = 'instance'
38
+ no_lsgan = False
39
+ init_type = 'normal'
40
+ init_gain = 0.02
41
+ use_sigmoid = no_lsgan
42
+ use_noise_input_D = False
43
+ use_dropout_D = False
44
+
45
+ # torch.cuda.set_device(gpu_ids[0])
46
+ use_GAN = True # default: True
47
+
48
+ # adam
49
+ beta1 = 0.5
50
+
51
+ # settings for GAN loss
52
+ num_classes_D = 1
53
+ lambda_GAN_D = 0.01
54
+ lambda_GAN_G = 0.01
55
+ lambda_GAN_gp = 100
56
+ lambda_BCE = 5
57
+ lambda_DICE = 5
58
+
59
+ input_nc_D = input_nc + 3
60
+
61
+ # settings for centerness
62
+ use_centerness =True # default: True
63
+ lambda_centerness = 1
64
+ center_loss_type = 'centerness'
65
+ centerness_map_size = [128,128]
66
+
67
+ # pretrained model
68
+ use_pretrained_G = True
69
+ use_pretrained_D = False
70
+
71
+ model_path_pretrained_G = r"../RIP/weight"
72
+
73
+ model_step_pretrained_G = 'best_drive'
74
+
75
+
76
+ # path for dataset
77
+ stride_height = 50
78
+ stride_width = 50
79
+
80
+
81
+ n_classes = 3
82
+
83
+ model_step = 0
84
+
85
+ # use CAM
86
+ use_CAM = False
87
+
88
+ #use resize
89
+ use_resize = False
90
+ resize_w_h = (256,256)
91
+
92
+ #use av_cross
93
+ use_av_cross = False
94
+
95
+ use_high_semantic = False
96
+ lambda_high = 1 # A,V,Vessel
97
+
98
+ # use global semantic
99
+ use_global_semantic = True
100
+ global_warmup_step = 0 if use_pretrained_G else 5000
101
+
102
+ # use network
103
+ use_network = 'convnext_tiny' # swin_t,convnext_tiny
104
+
105
+ dataset_path = {'DRIVE': './data/AV_DRIVE/training/',
106
+
107
+ 'hrf': './data/hrf/training/',
108
+
109
+ 'LES': './data/LES_AV/training/',
110
+
111
+ }
112
+ trainset_path = dataset_path[dataset_name]
113
+
114
+
115
+ print("Dataset:")
116
+ print(trainset_path)
117
+ print(use_network)
118
+
119
+
120
+
121
+
AV/log/ALL-2024_09_06_09_17_18(9000)/G_9000.pkl ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:15bff178940810286c7df89a4ac416a167a3f2a14c015ce3bf3a4c65b082ae49
3
+ size 115842242
AV/log/DRIVE-2023_10_20_08_36_50(6500)/G_6500.pkl ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:6c2f29a05c014e372e9f3421b34df62cf3a557b9f4834da27e37c555a4f465d5
3
+ size 115848956
AV/log/HRF-2023_10_19_11_07_31(1500)/G_1500.pkl ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:325ce3ee74ecd9fb1c554fe810f70e471595e2f521b5fb094a34a682846efb7d
3
+ size 115848956
AV/log/LES-2023_09_28_14_04_06(0)/G_0.pkl ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:21d2261e124773dfe15113c2bb9c4e16c59988c99613cba68e0209a69be7a770
3
+ size 115830579