Medha Sawhney
commited on
Commit
·
34a8bb0
1
Parent(s):
cc2adeb
uploading MEMTrack codebase
Browse files- .gitignore +5 -0
- MEMTrack/.gitignore +13 -0
- MEMTrack/LICENSE +21 -0
- MEMTrack/README.md +228 -0
- MEMTrack/debug.md +19 -0
- MEMTrack/requirements.txt +34 -0
- MEMTrack/scripts/inference.sh +28 -0
- MEMTrack/scripts/run.sh +14 -0
- MEMTrack/scripts/test_set_eval.sh +19 -0
- MEMTrack/scripts/tracking.sh +20 -0
- MEMTrack/scripts/train.sh +27 -0
- MEMTrack/src/GenerateTrackingData.py +248 -0
- MEMTrack/src/GenerateVideo.py +48 -0
- MEMTrack/src/LossEvalHook.py +70 -0
- MEMTrack/src/Tracking.py +114 -0
- MEMTrack/src/TrackingAnalysis.py +464 -0
- MEMTrack/src/__pycache__/GenerateTrackingData.cpython-38.pyc +0 -0
- MEMTrack/src/__pycache__/GenerateVideo.cpython-38.pyc +0 -0
- MEMTrack/src/__pycache__/Tracking.cpython-38.pyc +0 -0
- MEMTrack/src/__pycache__/TrackingAnalysis.cpython-38.pyc +0 -0
- MEMTrack/src/__pycache__/data_feature_gen.cpython-38.pyc +0 -0
- MEMTrack/src/__pycache__/data_prep_utils.cpython-38.pyc +0 -0
- MEMTrack/src/__pycache__/inferenceBacteriaRetinanet_Motility_v2.cpython-38.pyc +0 -0
- MEMTrack/src/__pycache__/sort.cpython-38.pyc +0 -0
- MEMTrack/src/data_feature_gen.py +628 -0
- MEMTrack/src/data_prep_utils.py +531 -0
- MEMTrack/src/evaluation_step_wise_motility.py +755 -0
- MEMTrack/src/evaluation_step_wise_trackmate.py +401 -0
- MEMTrack/src/inferenceBacteriaRetinanet_Motility.py +338 -0
- MEMTrack/src/inferenceBacteriaRetinanet_Motility_v2.py +230 -0
- MEMTrack/src/sort.py +332 -0
- MEMTrack/src/trainBacteriaRetinanetMotionData_Motility.py +318 -0
- MEMTrack/src/trainBacteriaRetinanetMotionData_Motility_Val_loss.py +333 -0
- MEMTrack/todo.txt +7 -0
- app.py +153 -171
- requirements.txt +35 -1
.gitignore
CHANGED
@@ -6,3 +6,8 @@ flagged/
|
|
6 |
frames/
|
7 |
diff_frames/
|
8 |
*.wmv
|
|
|
|
|
|
|
|
|
|
|
|
6 |
frames/
|
7 |
diff_frames/
|
8 |
*.wmv
|
9 |
+
data/
|
10 |
+
raw_data/
|
11 |
+
DataFeatures/
|
12 |
+
app_trial.py
|
13 |
+
models*
|
MEMTrack/.gitignore
ADDED
@@ -0,0 +1,13 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
#ignore models
|
2 |
+
models/*
|
3 |
+
|
4 |
+
#zip files
|
5 |
+
*.zip
|
6 |
+
|
7 |
+
#ignore data
|
8 |
+
data/*
|
9 |
+
!data/sample_videomap.txt
|
10 |
+
/DataFeatures/*
|
11 |
+
|
12 |
+
#ipynb checkpoints
|
13 |
+
*/.ipynb_checkpoints
|
MEMTrack/LICENSE
ADDED
@@ -0,0 +1,21 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
MIT License
|
2 |
+
|
3 |
+
Copyright (c) 2023 Medha Sawhney
|
4 |
+
|
5 |
+
Permission is hereby granted, free of charge, to any person obtaining a copy
|
6 |
+
of this software and associated documentation files (the "Software"), to deal
|
7 |
+
in the Software without restriction, including without limitation the rights
|
8 |
+
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
|
9 |
+
copies of the Software, and to permit persons to whom the Software is
|
10 |
+
furnished to do so, subject to the following conditions:
|
11 |
+
|
12 |
+
The above copyright notice and this permission notice shall be included in all
|
13 |
+
copies or substantial portions of the Software.
|
14 |
+
|
15 |
+
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
|
16 |
+
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
|
17 |
+
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
|
18 |
+
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
|
19 |
+
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
|
20 |
+
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
|
21 |
+
SOFTWARE.
|
MEMTrack/README.md
ADDED
@@ -0,0 +1,228 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
[](https://opensource.org/licenses/MIT)    [](https://colab.research.google.com/drive/1PltZ8q_AEmVxoSoh5gog288GcPfOgvZy?usp=sharing) [](https://arxiv.org/abs/2310.09441)
|
2 |
+
|
3 |
+
|
4 |
+
|
5 |
+
|
6 |
+
# MEMTrack
|
7 |
+
Deep learning based automated detection and tracking of bacteria in complex environments such as Collagen.
|
8 |
+
|
9 |
+
## Project Description
|
10 |
+
Tracking microrobots is a challenging task, considering their minute size and high speed. As the field progresses towards developing microrobots for biomedical applications and studying them in physiologically relevant or in vivo environments, this challenge is exacerbated by the dense surrounding environments with feature size and shape comparable to those of microrobots. To address this challenge, we developed Motion Enhanced Multi-level Tracker (MEMTrack), a robust pipeline for detecting and tracking micro-motors in bright-field microscopy videos using synthetic motion features, deep learning-based object detection, and a modified Simple Online and Real-time Tracking (SORT) algorithm with interpolation for tracking. Our object detection approach combines different models based on the object’s motion pattern. We trained and validated our model using bacterial micro- motors in the tissue-like collagen environment and tested it in collagen and liquid (aqueous) media. We demonstrate that MEMTrack can accurately predict and track even the most challenging bacterial micro-motors missed by skilled human annotators, achieving precision and recall of 77% and 48% in collagen and 94% and 35% in liquid media, respectively. We also show that MEMTrack is able to accurately quantitate the average speed of bacterial micromotors with no statistically significant difference from the laboriously produced manual tracking data. Our proposed pipeline not only represents a significant contribution to the field of microrobot image analysis and tracking using computer vision but also opens the potential of applying deep learning methods in vision-based control of microrobots for various applications, including disease diagnosis and treatment.
|
11 |
+
|
12 |
+
**Paper Link: https://onlinelibrary.wiley.com/doi/full/10.1002/aisy.202300590**
|
13 |
+
|
14 |
+
**Paper Arxiv Link: https://arxiv.org/abs/2310.09441** (For updated results and information)
|
15 |
+
|
16 |
+
**Google Colab Tutorial: https://colab.research.google.com/drive/1PltZ8q_AEmVxoSoh5gog288GcPfOgvZy?usp=sharing**
|
17 |
+
|
18 |
+
## Getting Started
|
19 |
+
### Installation
|
20 |
+
To get started with this project, follow these steps:
|
21 |
+
|
22 |
+
1. Clone the repository to your local machine using Git:
|
23 |
+
|
24 |
+
```bash
|
25 |
+
git clone https://github.com/sawhney-medha/MEMTrack.git
|
26 |
+
|
27 |
+
2. Navigate to the project directory:
|
28 |
+
|
29 |
+
```bash
|
30 |
+
cd MEMTrack
|
31 |
+
|
32 |
+
3. Create environemnt and install the required dependencies using pip:
|
33 |
+
|
34 |
+
```bash
|
35 |
+
python3.8 -m venv memtrack_env
|
36 |
+
source memtrack_env/bin/activate
|
37 |
+
cd MEMTrack
|
38 |
+
pip install -r requirements.txt
|
39 |
+
python -m pip install 'git+https://github.com/facebookresearch/detectron2.git'
|
40 |
+
|
41 |
+
|
42 |
+
4. Download models.zip containing pre-trained and store it in *MEMTrack/models*
|
43 |
+
|
44 |
+
**[Download Pretrained Models](https://drive.google.com/file/d/1agsLD5HV_VmDNpDhjHXTCAVmGUm2IQ6p/view?usp=sharing)**
|
45 |
+
|
46 |
+
### Reproducing results on sample test data
|
47 |
+
- **[Download Sample Data](https://drive.google.com/file/d/1YHgT--xyMrQYeyaBYiIxIfLdTu2nW7nQ/view?usp=sharing)**
|
48 |
+
- Run Data Loading ([Adding Data](#adding-data) and [Preprocessing Data](#preprocessing-data)) and [Feature Generation](#data-usage) Scripts for sample data.
|
49 |
+
- Follow [Inference](#inference-from-trained-object-detector-model), [Tracking](#tracking) and [Evaluation](#evaluation-and-analysis) instructions below.
|
50 |
+
- */src/Automated-ConfScoreThresholds.ipynb* Notebook has the code for generating confidence score thresholds for trained models on the validation set.
|
51 |
+
- *src/CollagenPRPlot.ipynb* Notebook generates the results on our test set and creates the plot to visualize it.
|
52 |
+
|
53 |
+
### Inference on custom data using pre-trained models
|
54 |
+
- Generate Data and store in the same format as mentioned in [Adding Data](#adding-data)
|
55 |
+
- Run Data Loading ([Adding Data](#adding-data) and [Preprocessing Data](#preprocessing-data)) and [Feature Generation](#data-usage) Scripts for custom data.
|
56 |
+
- Ensure custom data is in the mentioned format, else write your own code to have the same output format as the preprocessing code.
|
57 |
+
- Since inference test data will not have a Raw Data.csv, the code will automaticallly load data assuming no ground truth annotations were present in the data.
|
58 |
+
- Run Inference script and follow instructions in [Inference from Trained Object Detector Model](#inference-from-trained-object-detector-Model). The *coco_instances* json file will have a list of all predicted bounding boxes for every frame.
|
59 |
+
- Run Tracking scripts to generate tracklets
|
60 |
+
|
61 |
+
|
62 |
+
### Evaluation on custom data using pre-trained models
|
63 |
+
- Follow same instructions as Inference but have an updated RawData.csv with ground truth annotations.
|
64 |
+
- For evaluation follow instructions as mentioned in [Evaluation and Analysis](#evaluation-and-analysis)
|
65 |
+
|
66 |
+
## Data Preparation
|
67 |
+
### Adding Data
|
68 |
+
|
69 |
+
1. **Data Collection**
|
70 |
+
- We recorded bacteria (i.e., the micromotor in bacteria-based biohybrid microrobots) swimming behavior in collagen, as a tissue surrogate, and in an aqueous environment. In order to generate training, validation, and test datasets for MEMTrack, the microscopy videos acquired in collagen and in aqueous media were imported into ImageJ software. MTrackJ plugin was used to label all bacteria in each frame of each video manually, and their x and y coordinates were recorded.
|
71 |
+
- Any data can be used as long as a list of frames along with their respective x and y coordinates are available.
|
72 |
+
- To evaluate the results, track_ids would also be required for the annotated bacteria.
|
73 |
+
- To be able to train different models based on different motilites, motility sub population labels would also be required. If these are not available, one can train a single model for all annotated bacteria.
|
74 |
+
|
75 |
+
2. **Organize Data Directory:**
|
76 |
+
- The final data should be packed in a zip file with the following structure:
|
77 |
+
```
|
78 |
+
├── video_zip_file/
|
79 |
+
│ ├── Images without Labels/
|
80 |
+
│ │ ├── sample_name1.jpg
|
81 |
+
│ │ ├── sample_name2.jpg
|
82 |
+
│ │ └── ...
|
83 |
+
│ ├── Raw Data.csv
|
84 |
+
```
|
85 |
+
**[Download Sample Data](https://drive.google.com/file/d/1YHgT--xyMrQYeyaBYiIxIfLdTu2nW7nQ/view?usp=sharing)**
|
86 |
+
|
87 |
+
3. **Data Format:**
|
88 |
+
- RawData.csv should have the following columns:
|
89 |
+
- "TID" --> Track Id
|
90 |
+
- "PID" --> Picture Id/Frame Number
|
91 |
+
- "x [pixel]"" --> x coordinate
|
92 |
+
- "y [pixel]" --> y coordinate
|
93 |
+
- "subpopulation" -->containing "N"/"L"/"M"/"H" for subpopulations (Non Motile/Low Motility/Mid Motility/High Motility)
|
94 |
+
|
95 |
+
### Preprocessing Data
|
96 |
+
|
97 |
+
4. **Preprocessing Code:**
|
98 |
+
- Run the DataPreparation_Motility.ipynb notebook located in the *MEMTrack/src* directory and update the path variables (Cell 3) according to your directory structure.
|
99 |
+
- The code needs to be run for every video that needs to be loaded to *data/*
|
100 |
+
- The videomap.txt will automatically be generated after the preprocessing code
|
101 |
+
|
102 |
+
6. **Preprocessed Data Directory:**
|
103 |
+
- Expected Directory Structure after running the preprocessing code:
|
104 |
+
|
105 |
+
```
|
106 |
+
├── MEMTRack/
|
107 |
+
│ ├──data/
|
108 |
+
│ │ ├── videomap.txt
|
109 |
+
│ │ ├── collagen/
|
110 |
+
| | | ├── video1/
|
111 |
+
| | | | ├── frame1/
|
112 |
+
| | | | ├── annotations_motility_no/
|
113 |
+
| | | | | ├── 0.txt
|
114 |
+
| | | | | ├── 1.txt
|
115 |
+
| | | | | └── ...
|
116 |
+
| | | | ├── annotations_motility_low/
|
117 |
+
| | | | ├── annotations_motility_mid/
|
118 |
+
| | | | ├── annotations_motility_high/
|
119 |
+
| | | | ├── bacteria/
|
120 |
+
| | | | | ├── 1/
|
121 |
+
| | | | | | ├── xy_coord/
|
122 |
+
| | | | | | ├── 0.txt
|
123 |
+
| | | | | | ├── 1.txt
|
124 |
+
| | | | | | └── ...
|
125 |
+
| | | | | |
|
126 |
+
| | | | | ├── 2/
|
127 |
+
| | | | | └── ...
|
128 |
+
| | | | ├── images/
|
129 |
+
| | | | ├── 0.tif
|
130 |
+
| | | | ├── 1.tif
|
131 |
+
| | | | └── ...
|
132 |
+
| | | ├── video2/
|
133 |
+
| | | └── ...
|
134 |
+
│ │ └── ...
|
135 |
+
│ ├── src/
|
136 |
+
```
|
137 |
+
|
138 |
+
|
139 |
+
### Data Usage
|
140 |
+
|
141 |
+
7. **Feature Generation Code:**
|
142 |
+
- Run the DataFeaturePreparation.ipynb notebook located in the *MEMTrack/src* directory and update the path variables (Cell 3) according to your directory structure.
|
143 |
+
- Also update path variables and choose feature generation method at the end of the notebook (Cell 18 and 19 for Training Data and Cell 20 for Test/Inefrence) to generate and store features that would be used for training.
|
144 |
+
- The notebook provides multiple ways to generate features, the one recommended based on experiments on Collagen data is: *"optical_flow_median_back"*. This generates 3 channels for each frame: 1. Original Frame, 2. Consecutive Optical Flow Vector and 3. Difference from Median Background.
|
145 |
+
- Similarly, *"optical flow median back"* with *"optical_flow_prior"=x* variable is optical flow from xth previous frame. *"diff_from_max_absolute_consecutive_frame_diff"* creates a feature for difference from the "max consecutive frame diff" feature, with a frame diff prior for xth frame diff
|
146 |
+
- The train/test/val split can be provided in this code as dict of video numbers that have been loaded and accordingly their fearture sets will be generated.
|
147 |
+
|
148 |
+
|
149 |
+
6. **Final Data Directory:**
|
150 |
+
- Expected Directory Structure after feature generation code:
|
151 |
+
|
152 |
+
```
|
153 |
+
├── MEMTRack/
|
154 |
+
│ ├──data/
|
155 |
+
│ ├──data_features/
|
156 |
+
│ │ ├── exp_name/
|
157 |
+
| | | ├── data_features_set/
|
158 |
+
| | | | ├── train/
|
159 |
+
| | | | | ├── annotations_motility_no/
|
160 |
+
| | | | | | ├── 0.txt
|
161 |
+
| | | | | | ├── 1.txt
|
162 |
+
| | | | | | └── ...
|
163 |
+
| | | | | ├── annotations_motility_low/
|
164 |
+
| | | | | ├── annotations_motility_mid/
|
165 |
+
| | | | | ├── annotations_motility_high/
|
166 |
+
| | | | | ├── images/
|
167 |
+
| | | | | ├── 0.tif
|
168 |
+
| | | | | ├── 1.tif
|
169 |
+
| | | | | └── ...
|
170 |
+
| | | | | ├── images_feature/
|
171 |
+
| | | | | ├── 0.tif
|
172 |
+
| | | | | ├── 1.tif
|
173 |
+
| | | | | └── ...
|
174 |
+
| | | | ├── test/
|
175 |
+
| | | | ├── val/
|
176 |
+
| | | ├── data_feature_video1/test/
|
177 |
+
| | | ├── data_feature_video2/test/
|
178 |
+
| | | └── ...
|
179 |
+
│ │ └── ...
|
180 |
+
│ ├── src/
|
181 |
+
|
182 |
+
|
183 |
+
*The following sections describes the training, inference, tracking and evaluation procedures. The codebase is built using Python, PyTorch and Detectron 2.0.*
|
184 |
+
|
185 |
+
## Training Object Detector Model
|
186 |
+
- Run the training script */scripts/train.sh* to start training from the *MEMTRack/* root directory.
|
187 |
+
- Update *exp_name*, *data_path* (feature directory) and *output_dir* paths as approriate.
|
188 |
+
- The training parameters such as learning rate, epochs, etc. can be updated from the bash script.
|
189 |
+
- There are two python scripts for training: *src/trainBacteriaRetinanetMotionData_Motility.py* and *src/trainBacteriaRetinanetMotionData_Motility_Val_loss.py*. The only difference is that the latter generates loss plots for validation set during training that the Detectron2 code does not automatically generate, which can be visualized in the *src/Train_Val_Loss_Curve.ipynb* notebook. The code for it has been taken from the following repository: https://gist.github.com/ortegatron/c0dad15e49c2b74de8bb09a5615d9f6b
|
190 |
+
- The training script saves regular checkpoints along with the final model, which are stored in the *output_dir* specifed in the bash script.
|
191 |
+
- Post trainng save the trained model checkpoint in */src/models/*
|
192 |
+
|
193 |
+
```bash
|
194 |
+
bash scripts/train.sh
|
195 |
+
|
196 |
+
## Inference from Trained Object Detector Model
|
197 |
+
- Run the inference script */scripts/inference.sh* from the *MEMTrack/* root directory.
|
198 |
+
- Update *exp_name*, *data_path* (feature directory) and *model_dir* (directory with trained models) paths as approriate.
|
199 |
+
- The inference.sh scripts calls the */src/inferenceBacteriaRetinanet_Motility_v2.py* script, the paramters for which can be updated in the bash script. The output from inference is a json file containing object predictions, the json file is saved in the output_dir of the model.
|
200 |
+
|
201 |
+
```bash
|
202 |
+
bash scripts/inference.sh
|
203 |
+
|
204 |
+
## Tracking
|
205 |
+
- Run the tracking script */scripts/tracking.sh* from the *MEMTrack/* root directory.
|
206 |
+
- The tracking scripts generates data using the object detector predictions in the format expected by the SORT algorithm and then implements tracking.
|
207 |
+
- The script also calls a script to generate output videos from the tracked data.
|
208 |
+
- The parameters to update for trackiing code generation such as confidence thresholds, or tracking parameters like min_age can be updated in the bash script.
|
209 |
+
|
210 |
+
```bash
|
211 |
+
bash scripts/tracking.sh
|
212 |
+
|
213 |
+
## Evaluation and Analysis
|
214 |
+
- Run the evaluation script */scripts/test_set_eval.sh* from the *MEMTrack/* root directory.
|
215 |
+
- The script will automatically generate *test_results_<step>.txt* for every step of the MEMTrack pipline and stor the True Postives, False Positives and False Negatives to generate results on test sets.
|
216 |
+
|
217 |
+
```bash
|
218 |
+
bash scripts/test_set_eval.sh
|
219 |
+
- Can also just call */scripts/run.sh* to automate the inference to evaluation process.
|
220 |
+
- Finally run *src/CombinedTestSet-StepWiseEvaluation.ipynb* notebook to generate precision and recall values for an entire test set along with plots to visualize results
|
221 |
+
|
222 |
+
## Debugging
|
223 |
+
- All known issues are consistently updated in *debug.md*. Please refer to it before raising an issue.
|
224 |
+
## License
|
225 |
+
This project is licensed under the MIT License - see the [LICENSE](LICENSE) file for details.
|
226 |
+
|
227 |
+
## Acknowledgement
|
228 |
+
This research was supported in part by NSF grants CBET-2133739, CBET-1454226 and 4-VA grant to [Dr. Bahareh Behkam](https://me.vt.edu/people/faculty/behkam-bahareh.html), and NSF grant IIS-2107332 to [Dr. Anuj Karpatne](https://people.cs.vt.edu/karpatne/). Access to computing resources was provided by the Advanced Research Computing (ARC) Center at Virginia Tech.
|
MEMTrack/debug.md
ADDED
@@ -0,0 +1,19 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
# Debugging known Issues
|
2 |
+
|
3 |
+
1. **Error with OpenCV functions**
|
4 |
+
OpenCV(4.5.1) C:\Users\appveyor\AppData\Local\Temp\1\pip-req-build-1drr4hl0\opencv\modules\highgui\src\window.cpp:651: error: (-2:Unspecified error) The function is not implemented. Rebuild the library with Windows, GTK+ 2.x or Cocoa support.
|
5 |
+
|
6 |
+
```bash
|
7 |
+
pip uninstall opencv-python-headless -y
|
8 |
+
pip install opencv-python --upgrade
|
9 |
+
|
10 |
+
2. **Numpy error**
|
11 |
+
ImportError: lap requires numpy, please "pip install numpy"
|
12 |
+
|
13 |
+
```bash
|
14 |
+
pip install numpy
|
15 |
+
|
16 |
+
3. **Installing pycocotools**
|
17 |
+
Gthub link fails
|
18 |
+
```bash
|
19 |
+
pip install pycocotools>=2.0.2
|
MEMTrack/requirements.txt
ADDED
@@ -0,0 +1,34 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
pip==22.0.3
|
2 |
+
numpy
|
3 |
+
filterpy==1.4.5
|
4 |
+
json5==0.9.6
|
5 |
+
jsonlines==3.0.0
|
6 |
+
jsonpatch==1.32
|
7 |
+
jsonpointer==2.1
|
8 |
+
jsonschema==3.2.0
|
9 |
+
jupyter-client==7.0.3
|
10 |
+
jupyter-core==4.8.1
|
11 |
+
jupyter-server==1.11.0
|
12 |
+
jupyterlab==3.1.13
|
13 |
+
jupyterlab-pygments==0.1.2
|
14 |
+
jupyterlab-server==2.8.1
|
15 |
+
jupyterlab-widgets==1.0.2
|
16 |
+
lap==0.4.0
|
17 |
+
matplotlib==3.3.4
|
18 |
+
motmetrics==1.2.5
|
19 |
+
natsort==7.1.1
|
20 |
+
numpy==1.19.5
|
21 |
+
opencv-python==4.5.5.64
|
22 |
+
opencv-python-headless==4.5.4.60
|
23 |
+
openpyxl
|
24 |
+
pandas==1.1.5
|
25 |
+
plotly==5.11.0
|
26 |
+
scikit-image==0.17.2
|
27 |
+
scikit-learn==0.24.2
|
28 |
+
scipy==1.5.4
|
29 |
+
seaborn==0.11.2
|
30 |
+
torch==1.9.1
|
31 |
+
torchfile==0.1.0
|
32 |
+
torchmetrics==0.5.1
|
33 |
+
torchvision==0.10.1
|
34 |
+
tqdm==4.62.3
|
MEMTrack/scripts/inference.sh
ADDED
@@ -0,0 +1,28 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
#!/bin/bash
|
2 |
+
set -e
|
3 |
+
exp_name="collagen_motility_inference" # exp name
|
4 |
+
data_path="/home/medha/BacteriaDetectionTracking/MEMTrack/DataFeatures/" # feature directory path
|
5 |
+
data_path+=$exp_name
|
6 |
+
echo $data_path
|
7 |
+
|
8 |
+
|
9 |
+
#path to saved models
|
10 |
+
low_motility_model_path="/home/medha/BacteriaDetectionTracking/MEMTrack/models/motility/low/collagen_optical_flow_median_bkg_more_data_90k/"
|
11 |
+
wiggle_motility_model_path="/home/medha/BacteriaDetectionTracking/MEMTrack/models/motility/wiggle/collagen_optical_flow_median_bkg_more_data_90k/"
|
12 |
+
mid_motility_model_path="/home/medha/BacteriaDetectionTracking/MEMTrack/models/motility/mid/collagen_optical_flow_median_bkg_more_data_90k/"
|
13 |
+
high_motility_model_path="/home/medha/BacteriaDetectionTracking/MEMTrack/models/motility/high/collagen_optical_flow_median_bkg_more_data_90k/"
|
14 |
+
|
15 |
+
|
16 |
+
#update test video numbers from video map
|
17 |
+
for video_num in 2
|
18 |
+
do
|
19 |
+
|
20 |
+
#To genearate testing files for all motilities
|
21 |
+
python src/inferenceBacteriaRetinanet_Motility_v2.py --output_dir $low_motility_model_path --annotations_test "All" --video $video_num --test_dir $data_path
|
22 |
+
|
23 |
+
python src/inferenceBacteriaRetinanet_Motility_v2.py --output_dir $mid_motility_model_path --annotations_test "Motility-mid" --video $video_num --test_dir $data_path
|
24 |
+
|
25 |
+
python src/inferenceBacteriaRetinanet_Motility_v2.py --output_dir $high_motility_model_path --annotations_test "Motility-high" --video $video_num --test_dir $data_path
|
26 |
+
|
27 |
+
python src/inferenceBacteriaRetinanet_Motility_v2.py --output_dir $low_motility_model_path --annotations_test "Motility-low" --video $video_num --test_dir $data_path
|
28 |
+
done
|
MEMTrack/scripts/run.sh
ADDED
@@ -0,0 +1,14 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
#!/bin/bash
|
2 |
+
|
3 |
+
set -e
|
4 |
+
|
5 |
+
#RUN ONLY INFERENCE WITH EVAL(GT DATA)
|
6 |
+
bash scripts/inference.sh
|
7 |
+
#run with plot and plt gt
|
8 |
+
bash scripts/tracking.sh
|
9 |
+
bash scripts/test_set_eval.sh
|
10 |
+
|
11 |
+
# RUN ONLY INFERENCE NO EVAL(NO GT DATA)
|
12 |
+
bash scripts/inference_no_eval.sh
|
13 |
+
# #run only with plot
|
14 |
+
# bash scripts/tracking.sh
|
MEMTrack/scripts/test_set_eval.sh
ADDED
@@ -0,0 +1,19 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
#!/bin/bash
|
2 |
+
set -e
|
3 |
+
|
4 |
+
exp_name="collagen_motility_optical_flow_median_bkg_more_data/" # exp name
|
5 |
+
source_path="/data/medha/Bacteria"
|
6 |
+
data_path="$source_path/DataFeatures/$exp_name" # path toi feature directory
|
7 |
+
echo $data_path
|
8 |
+
|
9 |
+
video_map_path="$source_path/Data/videomap.txt" # path to video map
|
10 |
+
echo $video_map_path
|
11 |
+
|
12 |
+
test_results_path="$data_path/test_set_results*.txt" #path to test result output files
|
13 |
+
|
14 |
+
rm -rf $test_results_path
|
15 |
+
|
16 |
+
for video_num in 141 148 153 160 167 170
|
17 |
+
do
|
18 |
+
python src/evaluation_step_wise_motility.py --video_map_path $video_map_path --data_path $data_path --video $video_num
|
19 |
+
done
|
MEMTrack/scripts/tracking.sh
ADDED
@@ -0,0 +1,20 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
#!/bin/bash
|
2 |
+
set -e
|
3 |
+
|
4 |
+
exp_name="collagen_motility_optical_flow_median_bkg_more_data/"
|
5 |
+
data_path="/data/medha/Bacteria/DataFeatures/"
|
6 |
+
data_path+=$exp_name
|
7 |
+
echo $data_path
|
8 |
+
video_map_path="/home/medha/BacteriaDetectionTracking/MEMTrack/data/videomap.txt"
|
9 |
+
data_root_path="/home/medha/BacteriaDetectionTracking/MEMTrack/data/"
|
10 |
+
|
11 |
+
for video_num in 141 148 153 160 167 170
|
12 |
+
do
|
13 |
+
python src/GenerateTrackingData.py --filter_thresh 0.3 --video_num $video_num --data_path $data_path
|
14 |
+
python src/Tracking.py --video_num $video_num --data_path $data_path
|
15 |
+
#plot predictions and gt
|
16 |
+
python src/TrackingAnalysis.py --video_num $video_num --data_feature_path $data_path --video_map_path $video_map_path --data_root_path $data_root_path #--plot --plot_gt
|
17 |
+
# python src/GenerateVideo.py --video_num $video_num --fps 1 --data_path $data_path
|
18 |
+
# python src/GenerateVideo.py --video_num $video_num --fps 60 --data_path $data_path
|
19 |
+
done
|
20 |
+
|
MEMTrack/scripts/train.sh
ADDED
@@ -0,0 +1,27 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
#!/bin/bash
|
2 |
+
set -e
|
3 |
+
|
4 |
+
#test dir
|
5 |
+
exp_name="collagen_motility_optical_flow_consecutive_median_bkg" #exp name
|
6 |
+
data_path="/DataFeatures/" #path to data features dir
|
7 |
+
data_path+="$exp_name/data_feature_optical_flow_median_back_2pyr_18win_background_img/"
|
8 |
+
echo $data_path
|
9 |
+
|
10 |
+
|
11 |
+
output_dir="/alldata/medha/CleanCodeData/training-output/" #output dir to store models
|
12 |
+
output_dir+=$exp_name
|
13 |
+
|
14 |
+
|
15 |
+
low_output_dir="$output_dir/low_90k_00125"
|
16 |
+
high_output_dir="$output_dir/high_90k_00125"
|
17 |
+
mid_output_dir="$output_dir/mid_90k_00125"
|
18 |
+
wiggle_output_dir="$output_dir/wiggle_90k_00125"
|
19 |
+
|
20 |
+
|
21 |
+
python src/trainBacteriaRetinanetMotionData_Motility.py --source_path $data_path --output_dir $low_output_dir --annotations_train "Motility-low" --annotations_test "Motility-low" --bbox_size 31 --lr "0.00125" --epochs "90000"
|
22 |
+
|
23 |
+
python src/trainBacteriaRetinanetMotionData_Motility.py --source_path $data_path --output_dir $high_output_dir --annotations_train "Motility-high" --annotations_test "Motility-high" --bbox_size 31 --lr "0.00125" --epochs "90000"
|
24 |
+
|
25 |
+
python src/trainBacteriaRetinanetMotionData_Motility.py --source_path $data_path --output_dir $mid_output_dir --annotations_train "Motility-mid" --annotations_test "Motility-mid" --bbox_size 31 --lr "0.00125" --epochs "90000"
|
26 |
+
|
27 |
+
python src/trainBacteriaRetinanetMotionData_Motility.py --source_path $data_path --output_dir $wiggle_output_dir --annotations_train "Motility-wiggle" --annotations_test "Motility-wiggle" --bbox_size 31 --lr "0.00125" --epochs "90000"
|
MEMTrack/src/GenerateTrackingData.py
ADDED
@@ -0,0 +1,248 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
import json
|
2 |
+
import cv2
|
3 |
+
import os
|
4 |
+
import shutil
|
5 |
+
import argparse
|
6 |
+
import itertools
|
7 |
+
from natsort import natsorted
|
8 |
+
|
9 |
+
def combine_predictions(predictions_json_path, op_path):
|
10 |
+
#print(predictions_json_path)
|
11 |
+
all_preds = []
|
12 |
+
tracking_info_predictions_path = op_path
|
13 |
+
for path, diff in predictions_json_path:
|
14 |
+
if os.path.exists(path):
|
15 |
+
predictions_json = open(path)
|
16 |
+
predictions = json.load(predictions_json)
|
17 |
+
for pred in predictions:
|
18 |
+
pred["diff"] = diff
|
19 |
+
all_preds.append(pred)
|
20 |
+
all_preds.sort(key = lambda json: json['image_id'])
|
21 |
+
with open(tracking_info_predictions_path, "w") as track_info_file:
|
22 |
+
json.dump(all_preds, track_info_file)
|
23 |
+
|
24 |
+
def filter_boxes_size(tracking_info_predictions_path, op_path):
|
25 |
+
tracking_info_predictions_filetered_path = op_path
|
26 |
+
# filter boxes by size to remove extremely big boxes
|
27 |
+
combined_predictions_json = open(tracking_info_predictions_path)
|
28 |
+
combined_predictions = json.load(combined_predictions_json)
|
29 |
+
combined_predictions_filtered = []
|
30 |
+
for pred in combined_predictions:
|
31 |
+
width = pred["bbox"][2]
|
32 |
+
# print("w", width)
|
33 |
+
# print("h", height)
|
34 |
+
height = pred["bbox"][3]
|
35 |
+
if (width > 35) or (height > 35):
|
36 |
+
# print(width)
|
37 |
+
# print(height)
|
38 |
+
#print("removed")
|
39 |
+
#tracking_data.remove(tracked_box)
|
40 |
+
continue
|
41 |
+
else:
|
42 |
+
combined_predictions_filtered.append(pred)
|
43 |
+
with open(tracking_info_predictions_filetered_path, "w") as track_info_file:
|
44 |
+
json.dump(combined_predictions_filtered, track_info_file)
|
45 |
+
specific_file = tracking_info_predictions_filetered_path.split("filtered.json")[0] + "filter_box_size" +".json"
|
46 |
+
shutil.copy(tracking_info_predictions_filetered_path, specific_file)
|
47 |
+
|
48 |
+
def filter_conf_score(tracking_info_predictions_path, op_path):
|
49 |
+
tracking_info_predictions_filetered_path = op_path
|
50 |
+
# filter boxes by size to remove extremely big boxes
|
51 |
+
combined_predictions_json = open(tracking_info_predictions_path)
|
52 |
+
combined_predictions = json.load(combined_predictions_json)
|
53 |
+
combined_predictions_filtered = []
|
54 |
+
for pred in combined_predictions:
|
55 |
+
score = pred["score"]
|
56 |
+
# if pred["diff"] == "Motility-low" and score < 0.2:
|
57 |
+
# continue
|
58 |
+
if pred["diff"] == "Motility-wiggle" and score < 0.99:
|
59 |
+
continue
|
60 |
+
elif pred["diff"] == "Motility-mid" and score < 0.99:
|
61 |
+
continue
|
62 |
+
elif pred["diff"] == "Motility-high" and score < 0.99:
|
63 |
+
continue
|
64 |
+
else:
|
65 |
+
combined_predictions_filtered.append(pred)
|
66 |
+
with open(tracking_info_predictions_filetered_path, "w") as track_info_file:
|
67 |
+
json.dump(combined_predictions_filtered, track_info_file)
|
68 |
+
specific_file = tracking_info_predictions_filetered_path.split("filtered.json")[0] + "filter_conf_score" +".json"
|
69 |
+
shutil.copy(tracking_info_predictions_filetered_path, specific_file)
|
70 |
+
|
71 |
+
def nms_filter(tracking_info_predictions_path, op_path, iou_thresh_nms):
|
72 |
+
tracking_info_predictions_filetered_path = op_path
|
73 |
+
# filter boxes using nms to remove near duplicate boxes with lower confidence score
|
74 |
+
combined_predictions_json = open(tracking_info_predictions_path)
|
75 |
+
combined_predictions = json.load(combined_predictions_json)
|
76 |
+
nms_filtered_preds = []
|
77 |
+
for image_id, preds in itertools.groupby(combined_predictions, key = lambda k:k["image_id"]):
|
78 |
+
#print(image_id)
|
79 |
+
img_preds = []
|
80 |
+
for pred in preds:
|
81 |
+
img_preds.append(pred)
|
82 |
+
filtered_preds = nms_helper(img_preds, iou_thresh_nms)
|
83 |
+
nms_filtered_preds.extend(filtered_preds)
|
84 |
+
with open(tracking_info_predictions_filetered_path, "w") as track_info_file:
|
85 |
+
json.dump(nms_filtered_preds, track_info_file)
|
86 |
+
specific_file = tracking_info_predictions_filetered_path.split("filtered.json")[0] + "filter_nms" +".json"
|
87 |
+
shutil.copy(tracking_info_predictions_filetered_path, specific_file)
|
88 |
+
|
89 |
+
|
90 |
+
def nms_helper(combined_predictions, iou_thresh_nms):
|
91 |
+
iou_thresh_nms=0.70
|
92 |
+
final_preds = []
|
93 |
+
#get iou of all boxes wrt all boxes
|
94 |
+
iou_mat = get_iou_mat_image(combined_predictions, combined_predictions)
|
95 |
+
#get matching box pairings
|
96 |
+
matching_boxes_iou = get_matching_boxes_iou(combined_predictions, iou_mat, iou_thresh = iou_thresh_nms)
|
97 |
+
#run nms while loop only if overlapping boxes exists
|
98 |
+
while(len(matching_boxes_iou)>0):
|
99 |
+
#sort the list acc to confidence score
|
100 |
+
sorted_bbox_list = sorted(combined_predictions, key=lambda k: k["score"])
|
101 |
+
#print(sorted_bbox_list[0])
|
102 |
+
#print("largest:",sorted_bbox_list[-1])
|
103 |
+
#get iou of all boxes wrt to box with max conf score
|
104 |
+
iou_mat = get_iou_mat_image(sorted_bbox_list[:-1], [sorted_bbox_list[-1]])
|
105 |
+
preds_temp = []
|
106 |
+
for index, iou in enumerate(iou_mat):
|
107 |
+
if iou[0]<iou_thresh_nms:
|
108 |
+
#print(sorted_bbox_list[index])
|
109 |
+
preds_temp.append(sorted_bbox_list[index])
|
110 |
+
final_preds.append(sorted_bbox_list[-1])
|
111 |
+
combined_predictions = preds_temp
|
112 |
+
iou_mat = get_iou_mat_image(combined_predictions, combined_predictions)
|
113 |
+
matching_boxes_iou = get_matching_boxes_iou(combined_predictions, iou_mat, iou_thresh = iou_thresh_nms)
|
114 |
+
return final_preds
|
115 |
+
|
116 |
+
#Calculate IoU of 2 bounding boxes
|
117 |
+
def bb_intersection_over_union(boxA, boxB):
|
118 |
+
# determine the (x, y)-coordinates of the intersection rectangle
|
119 |
+
xA = max(boxA[0], boxB[0])
|
120 |
+
yA = max(boxA[1], boxB[1])
|
121 |
+
xB = min(boxA[2], boxB[2])
|
122 |
+
yB = min(boxA[3], boxB[3])
|
123 |
+
|
124 |
+
# compute the area of intersection rectangle
|
125 |
+
interArea = abs(max((xB - xA, 0)) * max((yB - yA), 0))
|
126 |
+
if interArea == 0:
|
127 |
+
return 0
|
128 |
+
|
129 |
+
# compute the area of both the prediction and ground-truth
|
130 |
+
# rectangles
|
131 |
+
boxAArea = abs((boxA[2] - boxA[0]) * (boxA[3] - boxA[1]))
|
132 |
+
boxBArea = abs((boxB[2] - boxB[0]) * (boxB[3] - boxB[1]))
|
133 |
+
|
134 |
+
# compute the intersection over union by taking the intersection
|
135 |
+
# area and dividing it by the sum of prediction + ground-truth
|
136 |
+
# areas - the interesection area
|
137 |
+
iou_box = interArea / float(boxAArea + boxBArea - interArea)
|
138 |
+
|
139 |
+
# return the intersection over union value
|
140 |
+
return iou_box
|
141 |
+
|
142 |
+
def get_iou_mat_image(preds_list1, preds_list2 ):
|
143 |
+
iou_mat = []
|
144 |
+
for pred1 in preds_list1:
|
145 |
+
box1 = pred1["bbox"]
|
146 |
+
iou_row = []
|
147 |
+
for pred2 in preds_list2:
|
148 |
+
box2 = pred2["bbox"]
|
149 |
+
box1_xyxy = [box1[0], box1[1], box1[0] + box1[2], box1[1] + box1[3]]
|
150 |
+
box2_xyxy = [box2[0], box2[1], box2[0] + box2[2], box2[1] + box2[3]]
|
151 |
+
iou_boxes = bb_intersection_over_union(box1_xyxy, box2_xyxy)
|
152 |
+
iou_row.append(iou_boxes)
|
153 |
+
iou_mat.append(iou_row)
|
154 |
+
return iou_mat
|
155 |
+
|
156 |
+
def get_matching_boxes_iou(preds_list, iou_mat, iou_thresh ):
|
157 |
+
matching_iou_boxes = []
|
158 |
+
for i in range(0, len(preds_list)):
|
159 |
+
iou_row_max = max(iou_mat[i])
|
160 |
+
iou_row_max_pred_id = iou_mat[i].index(iou_row_max)
|
161 |
+
# print((iou_row_max))
|
162 |
+
# print(iou_row_max_pred_id)
|
163 |
+
#print(iou_row_max)
|
164 |
+
if iou_row_max>iou_thresh:
|
165 |
+
matching_iou_boxes.append([i, iou_row_max_pred_id, iou_row_max])
|
166 |
+
# print(matching_iou_boxes)
|
167 |
+
#print("Number of matching IOU Ground truth and Predicted boxes: " , len(matching_iou_boxes))
|
168 |
+
return matching_iou_boxes
|
169 |
+
|
170 |
+
def gen_tracking_data_(video_num, predictions_json_path=None, op_path=None):
|
171 |
+
# predictions_json_path = f"../NewData-60FPS-Center/data_video{video_num}_feature_optical_flow_median_back_2pyr_18win_background_img/test/coco_instances_results.json"
|
172 |
+
# predictions_json_path = f"../NewData-60FPS-Center/video{video_num}_feature_optical_flow_median_back_2pyr_18win/test/coco_instances_results.json"
|
173 |
+
predictions_json = open(predictions_json_path)
|
174 |
+
predictions = json.load(predictions_json)
|
175 |
+
try:
|
176 |
+
print(predictions[0])
|
177 |
+
except:
|
178 |
+
print("No predictions")
|
179 |
+
tracking_info_predictions_path = op_path + f"./video{video_num}_predictions.json"
|
180 |
+
instance_dict = {}
|
181 |
+
tracking_info_dict = {}
|
182 |
+
instances_image = []
|
183 |
+
count = 0
|
184 |
+
prev_image_id = predictions[0]['image_id']
|
185 |
+
prev_image_name = ""
|
186 |
+
image_id = 0
|
187 |
+
#print(predictions[-12])
|
188 |
+
for image_id, preds in itertools.groupby(predictions, key = lambda k:k["image_id"]):
|
189 |
+
#print(image_id)
|
190 |
+
instances_image = []
|
191 |
+
image_name = str(image_id) + ".tif"
|
192 |
+
for prediction in preds:
|
193 |
+
score = prediction['score']
|
194 |
+
if score > 0.0:
|
195 |
+
x,y,w,h = prediction['bbox']
|
196 |
+
x2 = x + w
|
197 |
+
y2 = y + h
|
198 |
+
instance_bbox = [x, y, x2, y2]
|
199 |
+
instace_dict = {'bbox': instance_bbox, 'labels': prediction['category_id'], 'scores':prediction['score'], 'diff': prediction['diff']}
|
200 |
+
instances_image.append(instace_dict)
|
201 |
+
tracking_info_dict[image_name] = instances_image
|
202 |
+
with open(tracking_info_predictions_path, "w") as track_info_file:
|
203 |
+
json.dump(tracking_info_dict, track_info_file)
|
204 |
+
|
205 |
+
|
206 |
+
def gen_tracking_data(video_num, data_path=None, custom_test_dir=None, filter_thresh=0):
|
207 |
+
combined_pred_path = "coco_instances_results_combined.json"
|
208 |
+
combined_pred_filtered_path = "coco_instances_results_combined_filtered.json"
|
209 |
+
|
210 |
+
video_path = f"data_video{video_num}_feature_optical_flow_median_back_2pyr_18win_background_img/test/"
|
211 |
+
paths = []
|
212 |
+
if custom_test_dir:
|
213 |
+
data_path = custom_test_dir
|
214 |
+
video_path = ""
|
215 |
+
#print(os.path.join(data_path, video_path, f"coco_instances_results_{diff}.json"))
|
216 |
+
|
217 |
+
for diff in ["Motility-mid", "Motility-wiggle", "Motility-high" ]:
|
218 |
+
paths.append((os.path.join(data_path, video_path, f"coco_instances_results_{diff}.json"), diff))
|
219 |
+
|
220 |
+
op_path0 = os.path.join(data_path, video_path, combined_pred_path)
|
221 |
+
op_path1 = os.path.join(data_path, video_path, combined_pred_filtered_path)
|
222 |
+
|
223 |
+
combine_predictions(paths, op_path0)
|
224 |
+
|
225 |
+
filter_boxes_size(op_path0, op_path=op_path1)
|
226 |
+
filter_conf_score(op_path1, op_path=op_path1)
|
227 |
+
nms_filter(op_path1, op_path=op_path1, iou_thresh_nms=filter_thresh)
|
228 |
+
|
229 |
+
gen_tracking_data_(video_num, op_path1, os.path.join(data_path, video_path))
|
230 |
+
|
231 |
+
|
232 |
+
if __name__ == "__main__":
|
233 |
+
|
234 |
+
ap = argparse.ArgumentParser(description='Training')
|
235 |
+
ap.add_argument('--filter_thresh', default="0.0", type=float, metavar='THRESH')
|
236 |
+
ap.add_argument('--video_num', default="", type=str, metavar='VIDEO')
|
237 |
+
ap.add_argument('--data_path', default="19", type=str, metavar='PATH')
|
238 |
+
ap.add_argument('--custom_test_dir', type=str, metavar='CELL PATH')
|
239 |
+
args = ap.parse_args()
|
240 |
+
filter_thresh = args.filter_thresh
|
241 |
+
video_num = args.video_num
|
242 |
+
data_path = args.data_path
|
243 |
+
custom_test_dir = args.custom_test_dir
|
244 |
+
|
245 |
+
gen_tracking_data(video_num, data_path, custom_test_dir, filter_thresh)
|
246 |
+
|
247 |
+
|
248 |
+
|
MEMTrack/src/GenerateVideo.py
ADDED
@@ -0,0 +1,48 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
import cv2
|
2 |
+
import os
|
3 |
+
import argparse
|
4 |
+
from natsort import natsorted
|
5 |
+
|
6 |
+
def create_video(data_dir, image_dir, video_name,fps):
|
7 |
+
# choose codec according to format needed
|
8 |
+
fourcc = cv2.VideoWriter_fourcc(*'mp4v')
|
9 |
+
#print(data_dir)
|
10 |
+
img_sample = cv2.imread(os.path.join(image_dir,"0.png"))
|
11 |
+
#print(img_sample.shape)
|
12 |
+
height, width, channels = img_sample.shape
|
13 |
+
|
14 |
+
video = cv2.VideoWriter(data_dir + video_name + ".mp4", fourcc, fps, (width, height))
|
15 |
+
#data_dir = "./Data/video3/"
|
16 |
+
#image_dir = os.path.join(_dir, "images")
|
17 |
+
for frame in natsorted(os.listdir(image_dir)):
|
18 |
+
#print(frame)
|
19 |
+
img = cv2.imread(os.path.join(image_dir, frame))
|
20 |
+
video.write(img)
|
21 |
+
video.release()
|
22 |
+
|
23 |
+
def gen_tracking_video(video_num, fps=60, data_path=None, custom_test_dir=None):
|
24 |
+
if custom_test_dir:
|
25 |
+
data_dir = custom_test_dir
|
26 |
+
else:
|
27 |
+
data_dir = data_path + f"/data_video{video_num}_feature_optical_flow_median_back_2pyr_18win_background_img/"
|
28 |
+
image_dir = data_dir + "/test/tracklets-filtered/"
|
29 |
+
video_name = f'video{video_num}-tracklets-filtered-{fps}'
|
30 |
+
create_video(data_dir, image_dir, video_name,fps)
|
31 |
+
return os.path.join(data_dir, video_name)+ ".mp4"
|
32 |
+
|
33 |
+
if __name__ == "__main__":
|
34 |
+
ap = argparse.ArgumentParser(description='Training')
|
35 |
+
ap.add_argument('--video_num', default="", type=str, metavar='VIDEO')
|
36 |
+
ap.add_argument('--fps', default=1, type=int, metavar='FPS')
|
37 |
+
ap.add_argument('--data_path', default="19", type=str, metavar='PATH')
|
38 |
+
ap.add_argument('--custom_test_dir', type=str, metavar='CELL PATH')
|
39 |
+
args = ap.parse_args()
|
40 |
+
|
41 |
+
video_num = args.video_num
|
42 |
+
fps = args.fps
|
43 |
+
data_path = args.data_path
|
44 |
+
custom_test_dir = args.custom_test_dir
|
45 |
+
|
46 |
+
|
47 |
+
gen_tracking_video(video_num, fps=fps, data_path=data_path, custom_test_dir=custom_test_dir)
|
48 |
+
|
MEMTrack/src/LossEvalHook.py
ADDED
@@ -0,0 +1,70 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
from detectron2.engine.hooks import HookBase
|
2 |
+
from detectron2.evaluation import inference_context
|
3 |
+
from detectron2.utils.logger import log_every_n_seconds
|
4 |
+
from detectron2.data import DatasetMapper, build_detection_test_loader
|
5 |
+
import detectron2.utils.comm as comm
|
6 |
+
import torch
|
7 |
+
import time
|
8 |
+
import datetime
|
9 |
+
import logging
|
10 |
+
import numpy as np
|
11 |
+
|
12 |
+
class LossEvalHook(HookBase):
|
13 |
+
def __init__(self, eval_period, model, data_loader):
|
14 |
+
self._model = model
|
15 |
+
self._period = eval_period
|
16 |
+
self._data_loader = data_loader
|
17 |
+
|
18 |
+
def _do_loss_eval(self):
|
19 |
+
# Copying inference_on_dataset from evaluator.py
|
20 |
+
total = len(self._data_loader)
|
21 |
+
num_warmup = min(5, total - 1)
|
22 |
+
|
23 |
+
start_time = time.perf_counter()
|
24 |
+
total_compute_time = 0
|
25 |
+
losses = []
|
26 |
+
for idx, inputs in enumerate(self._data_loader):
|
27 |
+
if idx == num_warmup:
|
28 |
+
start_time = time.perf_counter()
|
29 |
+
total_compute_time = 0
|
30 |
+
start_compute_time = time.perf_counter()
|
31 |
+
if torch.cuda.is_available():
|
32 |
+
torch.cuda.synchronize()
|
33 |
+
total_compute_time += time.perf_counter() - start_compute_time
|
34 |
+
iters_after_start = idx + 1 - num_warmup * int(idx >= num_warmup)
|
35 |
+
seconds_per_img = total_compute_time / iters_after_start
|
36 |
+
if idx >= num_warmup * 2 or seconds_per_img > 5:
|
37 |
+
total_seconds_per_img = (time.perf_counter() - start_time) / iters_after_start
|
38 |
+
eta = datetime.timedelta(seconds=int(total_seconds_per_img * (total - idx - 1)))
|
39 |
+
log_every_n_seconds(
|
40 |
+
logging.INFO,
|
41 |
+
"Loss on Validation done {}/{}. {:.4f} s / img. ETA={}".format(
|
42 |
+
idx + 1, total, seconds_per_img, str(eta)
|
43 |
+
),
|
44 |
+
n=5,
|
45 |
+
)
|
46 |
+
loss_batch = self._get_loss(inputs)
|
47 |
+
losses.append(loss_batch)
|
48 |
+
mean_loss = np.mean(losses)
|
49 |
+
self.trainer.storage.put_scalar('validation_loss', mean_loss)
|
50 |
+
comm.synchronize()
|
51 |
+
|
52 |
+
return losses
|
53 |
+
|
54 |
+
def _get_loss(self, data):
|
55 |
+
# How loss is calculated on train_loop
|
56 |
+
metrics_dict = self._model(data)
|
57 |
+
metrics_dict = {
|
58 |
+
k: v.detach().cpu().item() if isinstance(v, torch.Tensor) else float(v)
|
59 |
+
for k, v in metrics_dict.items()
|
60 |
+
}
|
61 |
+
total_losses_reduced = sum(loss for loss in metrics_dict.values())
|
62 |
+
return total_losses_reduced
|
63 |
+
|
64 |
+
|
65 |
+
def after_step(self):
|
66 |
+
next_iter = self.trainer.iter + 1
|
67 |
+
is_final = next_iter == self.trainer.max_iter
|
68 |
+
if is_final or (self._period > 0 and next_iter % self._period == 0):
|
69 |
+
self._do_loss_eval()
|
70 |
+
self.trainer.storage.put_scalars(timetest=12)
|
MEMTrack/src/Tracking.py
ADDED
@@ -0,0 +1,114 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
import json
|
2 |
+
import matplotlib
|
3 |
+
import matplotlib.pylab as plt
|
4 |
+
from contextlib import suppress
|
5 |
+
from sort import Sort
|
6 |
+
import collections
|
7 |
+
from pprint import pprint
|
8 |
+
from sort import *
|
9 |
+
import os
|
10 |
+
import cv2
|
11 |
+
import numpy as np
|
12 |
+
import shutil
|
13 |
+
import argparse
|
14 |
+
from natsort import natsorted
|
15 |
+
|
16 |
+
def track_bacteria(video_num, max_age, max_interpolation, op_path=None, custom_test_dir=None, data_path=None):
|
17 |
+
#mot_tracker = Sort(max_age = 25, min_hits=0, iou_threshold=0.05, max_interpolation=25)
|
18 |
+
mot_tracker = Sort(max_age = max_age, min_hits=0, iou_threshold=0.05, max_interpolation=max_interpolation)
|
19 |
+
#video_num = 29
|
20 |
+
if custom_test_dir:
|
21 |
+
op_path = custom_test_dir
|
22 |
+
video_num = ""
|
23 |
+
else:
|
24 |
+
op_path = data_path + f"/data_video{video_num}_feature_optical_flow_median_back_2pyr_18win_background_img/test/"
|
25 |
+
|
26 |
+
jsonpath = op_path + f"./video{video_num}_predictions.json"
|
27 |
+
save_path = op_path + f"./video{video_num}-tracklets/"
|
28 |
+
tracking_predictions_path = op_path +f"./video{video_num}_tracking_predictions.json"
|
29 |
+
img_path = op_path + "/images/"
|
30 |
+
|
31 |
+
|
32 |
+
|
33 |
+
with open(jsonpath) as data_file:
|
34 |
+
data = json.load(data_file)
|
35 |
+
odata = collections.OrderedDict(sorted(data.items()))
|
36 |
+
print(jsonpath)
|
37 |
+
|
38 |
+
|
39 |
+
print(img_path)
|
40 |
+
shutil.rmtree(save_path, ignore_errors=True)
|
41 |
+
os.makedirs(save_path, exist_ok=True)
|
42 |
+
|
43 |
+
tracking_predictions = []
|
44 |
+
|
45 |
+
for key in natsorted(odata.keys()):
|
46 |
+
arrlist = []
|
47 |
+
det_img = cv2.imread(os.path.join(img_path, key))
|
48 |
+
height, width, channels = det_img.shape
|
49 |
+
overlay = det_img.copy()
|
50 |
+
det_result = data[key]
|
51 |
+
|
52 |
+
for info in det_result:
|
53 |
+
bbox = info['bbox']
|
54 |
+
#add filter bbox for size
|
55 |
+
labels = info['labels']
|
56 |
+
scores = info['scores']
|
57 |
+
templist = bbox+[scores]
|
58 |
+
arrlist.append(templist)
|
59 |
+
|
60 |
+
track_bbs_ids = mot_tracker.update(np.array(arrlist))
|
61 |
+
|
62 |
+
mot_imgid = key.replace('.tif','')
|
63 |
+
newname = save_path + mot_imgid + '_mot.jpg'
|
64 |
+
#print(mot_imgid)
|
65 |
+
|
66 |
+
for j in range(track_bbs_ids.shape[0]):
|
67 |
+
ele = track_bbs_ids[j, :]
|
68 |
+
x = int(ele[0])
|
69 |
+
y = int(ele[1])
|
70 |
+
x2 = int(ele[2])
|
71 |
+
y2 = int(ele[3])
|
72 |
+
x_cen = x + int((x2-x)/2)
|
73 |
+
y_cen = y + int((y2-y)/2)
|
74 |
+
track_label = str(int(ele[4]))
|
75 |
+
if x_cen>= width:
|
76 |
+
continue
|
77 |
+
if y_cen>= height:
|
78 |
+
continue
|
79 |
+
#cv2.rectangle(det_img, (x, y), (x2, y2), (0, 255, 255), 4)
|
80 |
+
# cv2.line(det_img, (x, y), (x2, y2), (0, 255, 255), 1, )
|
81 |
+
# cv2.line(det_img, (x2, y), (x, y2), (0, 255, 255), 1)
|
82 |
+
cv2.drawMarker(det_img, (x_cen + 5, y_cen + 5),(0,255,255), markerType=cv2.MARKER_CROSS,
|
83 |
+
markerSize=10, thickness=1, line_type=cv2.LINE_AA)
|
84 |
+
cv2.putText(det_img, '#'+track_label, (x, y-4), 0,0.4,(0,255,255),thickness=1)
|
85 |
+
w = x2 - x
|
86 |
+
h = y2 - y
|
87 |
+
bbox = [x, y, w, h]
|
88 |
+
instance_prediction = {'image_id': int(mot_imgid) , 'category_id': 0, 'bbox': bbox, 'score':-1, 'track_label': track_label}
|
89 |
+
tracking_predictions.append(instance_prediction)
|
90 |
+
|
91 |
+
cv2.imwrite(newname,det_img)
|
92 |
+
|
93 |
+
|
94 |
+
with open(tracking_predictions_path, "w") as track_preds_file:
|
95 |
+
json.dump(tracking_predictions, track_preds_file)
|
96 |
+
|
97 |
+
if __name__ == "__main__":
|
98 |
+
|
99 |
+
ap = argparse.ArgumentParser(description="")
|
100 |
+
ap.add_argument('--video_num', default="", type=str, metavar='VIDEO')
|
101 |
+
ap.add_argument('--data_path', default="19", type=str, metavar='PATH')
|
102 |
+
ap.add_argument('--custom_test_dir', default="", type=str, metavar='CELL PATH')
|
103 |
+
ap.add_argument('--max_age', type=int, metavar='CELL PATH', default=25)
|
104 |
+
ap.add_argument('--max_interpolation', type=int, metavar='CELL PATH', default=25)
|
105 |
+
|
106 |
+
args = ap.parse_args()
|
107 |
+
video_num = args.video_num
|
108 |
+
custom_test_dir = args.custom_test_dir
|
109 |
+
data_path = args.data_path
|
110 |
+
|
111 |
+
max_age = args.max_age
|
112 |
+
max_interpolation = args.max_interpolation
|
113 |
+
|
114 |
+
track_bacteria(video_num, max_age, max_interpolation, custom_test_dir=custom_test_dir, data_path=data_path)
|
MEMTrack/src/TrackingAnalysis.py
ADDED
@@ -0,0 +1,464 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
import json
|
2 |
+
import seaborn as sns
|
3 |
+
import matplotlib.pyplot as plt
|
4 |
+
import numpy as np
|
5 |
+
import cv2
|
6 |
+
import os
|
7 |
+
import glob
|
8 |
+
import shutil
|
9 |
+
import pandas as pd
|
10 |
+
import argparse
|
11 |
+
from natsort import natsorted
|
12 |
+
|
13 |
+
def filter_boxes_size(tracking_data):
|
14 |
+
# filter boxes by size to remove extremely big boxes
|
15 |
+
tracking_data_filtered = []
|
16 |
+
for tracked_box in tracking_data:
|
17 |
+
width = tracked_box["bbox"][2]
|
18 |
+
# print("w", width)
|
19 |
+
# print("h", height)
|
20 |
+
height = tracked_box["bbox"][3]
|
21 |
+
if (width > 35) or (height > 35):
|
22 |
+
# print(width)
|
23 |
+
# print(height)
|
24 |
+
#print("removed")
|
25 |
+
#tracking_data.remove(tracked_box)
|
26 |
+
continue
|
27 |
+
else:
|
28 |
+
tracking_data_filtered.append(tracked_box)
|
29 |
+
return tracking_data_filtered
|
30 |
+
|
31 |
+
|
32 |
+
def groupby_trackid(tracking_data):
|
33 |
+
#generate dict to contains tracked boxes per track id
|
34 |
+
track_id_details = {}
|
35 |
+
for tracked_box in tracking_data:
|
36 |
+
#print(tracked_box["track_label"])
|
37 |
+
track_label = tracked_box["track_label"]
|
38 |
+
#print("track_label", track_label)
|
39 |
+
if track_label not in track_id_details.keys():
|
40 |
+
#print("adding to dict")
|
41 |
+
track_id_details[track_label] = []
|
42 |
+
track_id_details[track_label].append(tracked_box)
|
43 |
+
print("----------------------------------------------------------")
|
44 |
+
print("sample track id list in dict: ", track_id_details[track_label][:2])
|
45 |
+
print("----------------------------------------------------------")
|
46 |
+
print("dict track id keys: ", track_id_details.keys())
|
47 |
+
print("----------------------------------------------------------")
|
48 |
+
return track_id_details
|
49 |
+
|
50 |
+
def analyze_track_preds(track_id_details, filter_length=False, min_con_length=11):
|
51 |
+
#get avg frames per track
|
52 |
+
#get num of fragments
|
53 |
+
#get fragments length
|
54 |
+
if filter_length == True:
|
55 |
+
track_id_filtered_length = track_id_details.copy()
|
56 |
+
num_frames = []
|
57 |
+
num_fragments = []
|
58 |
+
fragment_lengths = []
|
59 |
+
skipped_frames = []
|
60 |
+
for track_id in track_id_details.keys():
|
61 |
+
num_frames_track = 0
|
62 |
+
num_fragments_track = 0
|
63 |
+
prev_image_id = 0
|
64 |
+
prev_fragment_length = 0
|
65 |
+
new_image = True
|
66 |
+
frags_len_track = []
|
67 |
+
skip_frames_track = []
|
68 |
+
skipped_frames_track = 0
|
69 |
+
#print("new track: ", track_id)
|
70 |
+
tracked_boxes = track_id_details[track_id]
|
71 |
+
#print(tracked_boxes)
|
72 |
+
for tracked_box in tracked_boxes:
|
73 |
+
#print("box", tracked_box)
|
74 |
+
image_id = tracked_box["image_id"]
|
75 |
+
num_frames_track += 1
|
76 |
+
#print("num frames: ", num_frames_track)
|
77 |
+
if new_image == True:
|
78 |
+
new_image = False
|
79 |
+
num_fragments_track +=1
|
80 |
+
prev_image_id = image_id
|
81 |
+
fragment_start = image_id
|
82 |
+
|
83 |
+
#print("num fragments: ", num_fragments_track)
|
84 |
+
else:
|
85 |
+
if prev_image_id + 1 == image_id:
|
86 |
+
prev_fragment_length = prev_image_id - fragment_start + 1
|
87 |
+
prev_image_id +=1
|
88 |
+
|
89 |
+
else:
|
90 |
+
#print("image_id: ", image_id)
|
91 |
+
#print("prev_image_id: ", prev_image_id)
|
92 |
+
skipped_frames_track = image_id-prev_image_id-1
|
93 |
+
skip_frames_track.append(skipped_frames_track)
|
94 |
+
prev_fragment_length = prev_image_id - fragment_start + 1
|
95 |
+
frags_len_track.append(prev_fragment_length)
|
96 |
+
num_fragments_track +=1
|
97 |
+
fragment_start = image_id
|
98 |
+
prev_image_id = image_id
|
99 |
+
#print("num fragments: ", num_fragments_track)
|
100 |
+
prev_fragment_length = prev_image_id - fragment_start + 1
|
101 |
+
frags_len_track.append(prev_fragment_length)
|
102 |
+
if num_fragments_track == 1:
|
103 |
+
skip_frames_track.append(skipped_frames_track)
|
104 |
+
num_frames.append(num_frames_track)
|
105 |
+
if num_frames_track< min_con_length and filter_length is True:
|
106 |
+
track_id_filtered_length.pop(track_id, None)
|
107 |
+
#print("removing")
|
108 |
+
num_fragments.append(num_fragments_track)
|
109 |
+
fragment_lengths.append(frags_len_track)
|
110 |
+
skipped_frames.extend(skip_frames_track)
|
111 |
+
if filter_length is True:
|
112 |
+
return num_frames, num_fragments, fragment_lengths, skipped_frames, track_id_filtered_length
|
113 |
+
return num_frames, num_fragments, fragment_lengths, skipped_frames
|
114 |
+
|
115 |
+
def groupby_imageid(tracking_data):
|
116 |
+
#generate dict to contains tracked boxes per track id
|
117 |
+
image_id_details = {}
|
118 |
+
for track_label in tracking_data:
|
119 |
+
for tracked_box in tracking_data[track_label]:
|
120 |
+
image_id = tracked_box["image_id"]
|
121 |
+
if image_id not in image_id_details.keys():
|
122 |
+
#print("adding to dict")
|
123 |
+
image_id_details[image_id] = []
|
124 |
+
image_id_details[image_id].append(tracked_box)
|
125 |
+
try:
|
126 |
+
|
127 |
+
print("----------------------------------------------------------")
|
128 |
+
print("sample image id list in dict: ", image_id_details[image_id][:2])
|
129 |
+
print("----------------------------------------------------------")
|
130 |
+
print("dict image id keys: ", image_id_details.keys())
|
131 |
+
print("----------------------------------------------------------")
|
132 |
+
except:
|
133 |
+
print("Error in print statements")
|
134 |
+
return image_id_details
|
135 |
+
|
136 |
+
|
137 |
+
|
138 |
+
def analyse_tracking(video_num, min_track_length=60, custom_test_dir=None, data_feature_path=None,
|
139 |
+
video_map_path="data/videomap.txt", data_root_path="", plot=True, plot_gt=False):
|
140 |
+
#tracking-results:
|
141 |
+
if custom_test_dir:
|
142 |
+
op_path = custom_test_dir
|
143 |
+
video_num= ""
|
144 |
+
else:
|
145 |
+
op_path = data_feature_path + f"/data_video{video_num}_feature_optical_flow_median_back_2pyr_18win_background_img/test/"
|
146 |
+
tracking_predictions_path = op_path + f"./video{video_num}_tracking_predictions.json"
|
147 |
+
|
148 |
+
video_map_path = video_map_path
|
149 |
+
video_map = open(video_map_path,'r',)
|
150 |
+
header = [x.strip() for x in (video_map.readline().strip()).split(",")]
|
151 |
+
video_num_id = header.index("video_num")
|
152 |
+
strain_id = header.index("strain")
|
153 |
+
strain_map = {}
|
154 |
+
for line in video_map.readlines():
|
155 |
+
line_details = [x.strip() for x in line.split(",")]
|
156 |
+
video_id = int(line_details[video_num_id])
|
157 |
+
strain = line_details[strain_id]
|
158 |
+
strain_map[video_id] = strain
|
159 |
+
print(strain_map)
|
160 |
+
|
161 |
+
with open(tracking_predictions_path) as data_file:
|
162 |
+
tracking_data = json.load(data_file)
|
163 |
+
print("Total tracking data: ", len(tracking_data))
|
164 |
+
|
165 |
+
tracking_data_filtered = tracking_data
|
166 |
+
|
167 |
+
|
168 |
+
track_id_details = groupby_trackid(tracking_data_filtered)
|
169 |
+
num_frames, num_fragments, fragment_lengths, skipped_frames, track_id_filtered_length =analyze_track_preds(track_id_details,filter_length=True,min_con_length=min_track_length)
|
170 |
+
|
171 |
+
try:
|
172 |
+
print("Number of unique track ids: ", len(num_frames))
|
173 |
+
print("----------------------------------------------------------")
|
174 |
+
print("Average number of frames per track id: ", round(np.mean(num_frames)))
|
175 |
+
print("Maximum number of frames per track id: ", np.max(num_frames))
|
176 |
+
print("Minimum number of frames per track id: ", np.min(num_frames))
|
177 |
+
print("Median number of frames per track id: ", np.median(num_frames))
|
178 |
+
print("----------------------------------------------------------")
|
179 |
+
print("Average number of fragments per track id: ", round(np.mean(num_fragments)))
|
180 |
+
print("Maximum number of fragments per track id: ", np.max(num_fragments))
|
181 |
+
print("Minimum number of fragments per track id: ", np.min(num_fragments))
|
182 |
+
print("Median number of fragments per track id: ", np.median(num_fragments))
|
183 |
+
print("----------------------------------------------------------")
|
184 |
+
print("Average number of skipped frames per fragment: ", round(np.mean(skipped_frames)))
|
185 |
+
print("Maximum number of skipped frames per fragment: ", np.max(skipped_frames))
|
186 |
+
print("Minimum number of skipped frames per fragment: ", np.min(skipped_frames))
|
187 |
+
except:
|
188 |
+
print("Error in print statements")
|
189 |
+
|
190 |
+
num_frames, num_fragments, fragment_lengths, skipped_frames = analyze_track_preds(track_id_filtered_length)
|
191 |
+
|
192 |
+
try:
|
193 |
+
|
194 |
+
print("Number of unique track ids: ", len(num_frames))
|
195 |
+
print("----------------------------------------------------------")
|
196 |
+
print("Average number of frames per track id: ", round(np.mean(num_frames)))
|
197 |
+
print("Maximum number of frames per track id: ", np.max(num_frames))
|
198 |
+
print("Minimum number of frames per track id: ", np.min(num_frames))
|
199 |
+
print("Median number of frames per track id: ", np.median(num_frames))
|
200 |
+
print("----------------------------------------------------------")
|
201 |
+
print("Average number of fragments per track id: ", round(np.mean(num_fragments)))
|
202 |
+
print("Maximum number of fragments per track id: ", np.max(num_fragments))
|
203 |
+
print("Minimum number of fragments per track id: ", np.min(num_fragments))
|
204 |
+
print("Median number of fragments per track id: ", np.median(num_fragments))
|
205 |
+
print("----------------------------------------------------------")
|
206 |
+
print("Average number of skipped frames per fragment: ", round(np.mean(skipped_frames)))
|
207 |
+
print("Maximum number of skipped frames per fragment: ", np.max(skipped_frames))
|
208 |
+
print("Minimum number of skipped frames per fragment: ", np.min(skipped_frames))
|
209 |
+
except:
|
210 |
+
print("Error in print statements")
|
211 |
+
|
212 |
+
all_preds = []
|
213 |
+
if custom_test_dir:
|
214 |
+
final_preds_path = custom_test_dir
|
215 |
+
else:
|
216 |
+
final_preds_path = data_feature_path + f"/data_video{video_num}_feature_optical_flow_median_back_2pyr_18win_background_img/test/"
|
217 |
+
final_preds_path += "coco_instances_results_final.json"
|
218 |
+
for track_id in track_id_filtered_length.keys():
|
219 |
+
for pred in track_id_filtered_length[track_id]:
|
220 |
+
all_preds.append(pred)
|
221 |
+
with open(final_preds_path, "w") as track_info_file:
|
222 |
+
json.dump(all_preds, track_info_file)
|
223 |
+
|
224 |
+
|
225 |
+
|
226 |
+
image_id_filtered = groupby_imageid(track_id_filtered_length)
|
227 |
+
|
228 |
+
if plot :
|
229 |
+
#create images with labels plot
|
230 |
+
data_dir = data_root_path
|
231 |
+
data_sub_dirs = glob.glob(f'{data_dir}/*')
|
232 |
+
#print("data dirs: ", data_sub_dirs)
|
233 |
+
video_dirs = glob.glob(f'{data_dir}/*/*')
|
234 |
+
for video in video_dirs:
|
235 |
+
#print(video)
|
236 |
+
if str(video_num) in video:
|
237 |
+
print(video)
|
238 |
+
video_path = video
|
239 |
+
break
|
240 |
+
|
241 |
+
img_path = video_path + f"/frame1/images/"
|
242 |
+
if custom_test_dir:
|
243 |
+
save_path = custom_test_dir +"tracklets-filtered/"
|
244 |
+
else:
|
245 |
+
save_path = data_feature_path + f"/data_video{video_num}_feature_optical_flow_median_back_2pyr_18win_background_img/test/tracklets-filtered/"
|
246 |
+
shutil.rmtree(save_path, ignore_errors=True)
|
247 |
+
os.makedirs(save_path, exist_ok=True)
|
248 |
+
|
249 |
+
if plot_gt:
|
250 |
+
ground_truth_json_path = op_path + "/test_All.json"
|
251 |
+
ground_truth_json_path2 = op_path + "/test_Easy+Hard.json"
|
252 |
+
if os.path.exists(ground_truth_json_path):
|
253 |
+
gt_json = open(ground_truth_json_path)
|
254 |
+
ground_truth = json.load(gt_json)
|
255 |
+
else: # os.path.exists(ground_truth_json_path2):
|
256 |
+
gt_json = open(ground_truth_json_path)
|
257 |
+
ground_truth = json.load(gt_json)
|
258 |
+
# else:
|
259 |
+
# ground_truth_json_path = args.data_path + f"/data_video{video_num}_feature_optical_flow_median_back_2pyr_18win_background_img/test/test_Hard.json"
|
260 |
+
# gt_json = open(ground_truth_json_path)
|
261 |
+
# ground_truth = json.load(gt_json)
|
262 |
+
|
263 |
+
#need frame id, bacteria track id and bbox
|
264 |
+
data_path = video_path + f"/frame1/bacteria/"
|
265 |
+
bacteria_dirs = os.listdir(data_path)
|
266 |
+
print(bacteria_dirs)
|
267 |
+
|
268 |
+
ground_truth_data_bacteria_track_specific = []
|
269 |
+
for bacteria in bacteria_dirs:
|
270 |
+
#print(bacteria)
|
271 |
+
for coord_txt in os.listdir(os.path.join(data_path,bacteria,"xy_coord")):
|
272 |
+
#print(coord_txt)
|
273 |
+
if not coord_txt.endswith(".ipynb_checkpoints"):
|
274 |
+
frame_id = int(coord_txt.split(".txt")[0])
|
275 |
+
coord_file = open(os.path.join(data_path,bacteria,"xy_coord", coord_txt),'r')
|
276 |
+
line = coord_file.readlines()
|
277 |
+
#print(line)
|
278 |
+
if len(line)>0:
|
279 |
+
x = float(line[0].split(" ")[0])
|
280 |
+
y = float(line[0].split(" ")[1])
|
281 |
+
# print(line)
|
282 |
+
# print(x)
|
283 |
+
# print(y)
|
284 |
+
#print(frame_id)
|
285 |
+
width = height = 30
|
286 |
+
factor_w = 1
|
287 |
+
factor_h = 1
|
288 |
+
x1 = int(x*factor_w - (width // 2))
|
289 |
+
y1 = int(y*factor_h - (height // 2))
|
290 |
+
w=h=30
|
291 |
+
bbox = [x1,y1,w,h]
|
292 |
+
entry = {'image_id': frame_id, 'track_label':int(bacteria), 'bbox': bbox}
|
293 |
+
ground_truth_data_bacteria_track_specific.append(entry)
|
294 |
+
track_id_ground_truth = groupby_trackid(ground_truth_data_bacteria_track_specific)
|
295 |
+
ground_truth = groupby_imageid(track_id_ground_truth)
|
296 |
+
|
297 |
+
|
298 |
+
if plot :
|
299 |
+
for image_id in image_id_filtered:
|
300 |
+
#print(image_id)
|
301 |
+
newname = save_path + str(image_id) + '.png'
|
302 |
+
det_img = cv2.imread(os.path.join(img_path,str(image_id))+".tif")
|
303 |
+
det_img_gt_only = det_img.copy()
|
304 |
+
det_img_p_only = det_img.copy()
|
305 |
+
|
306 |
+
height, width, channels = det_img.shape
|
307 |
+
#print (height, width, channels)
|
308 |
+
# plotting filtered predictions
|
309 |
+
for tracked_box in image_id_filtered[image_id]:
|
310 |
+
bbox = tracked_box["bbox"]
|
311 |
+
x = bbox[0]
|
312 |
+
y = bbox[1]
|
313 |
+
w = bbox[2]
|
314 |
+
h = bbox[3]
|
315 |
+
x_cen = x + int(w/2)
|
316 |
+
y_cen = y + int(h/2)
|
317 |
+
track_label = tracked_box["track_label"]
|
318 |
+
cv2.drawMarker(det_img, (x_cen , y_cen),(0,255,255), markerType=cv2.MARKER_CROSS,
|
319 |
+
markerSize=5, thickness=1, line_type=cv2.LINE_AA)
|
320 |
+
|
321 |
+
# print("Y:",y_cen)
|
322 |
+
# print("X:",x_cen)
|
323 |
+
if x_cen>660:
|
324 |
+
print("here")
|
325 |
+
if y_cen>490:
|
326 |
+
print("here")
|
327 |
+
cv2.putText(det_img, '#'+track_label, (x, y-6), 0,0.6,(0,255,255),thickness=1)
|
328 |
+
if plot_gt:
|
329 |
+
#plotting ground truth
|
330 |
+
try:
|
331 |
+
for ground_truth_box in ground_truth[image_id]:
|
332 |
+
bbox = ground_truth_box["bbox"]
|
333 |
+
x = bbox[0]
|
334 |
+
y = bbox[1]
|
335 |
+
w = bbox[2]
|
336 |
+
h = bbox[3]
|
337 |
+
x_cen = x + int(w/2)
|
338 |
+
y_cen = y + int(h/2)
|
339 |
+
if x_cen>660:
|
340 |
+
print(image_id)
|
341 |
+
print("here")
|
342 |
+
print("Y:",y_cen)
|
343 |
+
print("X:",x_cen)
|
344 |
+
if y_cen>490:
|
345 |
+
print(image_id)
|
346 |
+
print("here")
|
347 |
+
print("Y:",y_cen)
|
348 |
+
print("X:",x_cen)
|
349 |
+
track_label = ground_truth_box["track_label"]
|
350 |
+
if y_cen>490:
|
351 |
+
print(track_label)
|
352 |
+
cv2.drawMarker(det_img, (x_cen, y_cen),(255,255,0), markerType=cv2.MARKER_TILTED_CROSS,
|
353 |
+
markerSize=5, thickness=1, line_type=cv2.LINE_AA)
|
354 |
+
cv2.putText(det_img, '#'+str(track_label), (x, y+4), 0,0.6,(255,255,0),thickness=1)
|
355 |
+
except:
|
356 |
+
pass
|
357 |
+
cv2.imwrite(newname,det_img)
|
358 |
+
|
359 |
+
|
360 |
+
#image id wise csv
|
361 |
+
bacteria_count = 0
|
362 |
+
tracked_raw_data = pd.DataFrame(columns = ['Nr', 'TID', 'PID', 'x [pixel]', 'y [pixel]'])
|
363 |
+
for image_id in image_id_filtered.keys():
|
364 |
+
#print(image_id)
|
365 |
+
pid = image_id
|
366 |
+
for tracked_box in image_id_filtered[image_id]:
|
367 |
+
tracked_bacteria_data = []
|
368 |
+
tid = tracked_box["track_label"]
|
369 |
+
#print(tid)
|
370 |
+
bbox = tracked_box["bbox"]
|
371 |
+
x = bbox[0]
|
372 |
+
y = bbox[1]
|
373 |
+
w = bbox[2]
|
374 |
+
h = bbox[3]
|
375 |
+
x_cen = x + w/2
|
376 |
+
y_cen = y + h/2
|
377 |
+
bacteria_count +=1
|
378 |
+
tracked_bacteria_data ={"Nr" : bacteria_count,'TID' : tid, 'PID' : pid,
|
379 |
+
'x [pixel]' : x_cen, 'y [pixel]' : y_cen }
|
380 |
+
tracked_raw_data = tracked_raw_data.append(tracked_bacteria_data, ignore_index=True)
|
381 |
+
|
382 |
+
#track id wise csa raw data
|
383 |
+
bacteria_count = 0
|
384 |
+
tracked_raw_data = pd.DataFrame(columns = ['Nr', 'TID', 'PID', 'x [pixel]', 'y [pixel]'])
|
385 |
+
for track_id in natsorted(track_id_filtered_length.keys()):
|
386 |
+
tid = track_id
|
387 |
+
for tracked_box in track_id_filtered_length[track_id]:
|
388 |
+
tracked_bacteria_data = []
|
389 |
+
pid = tracked_box["image_id"]
|
390 |
+
#print(pid)
|
391 |
+
bbox = tracked_box["bbox"]
|
392 |
+
x = bbox[0]
|
393 |
+
y = bbox[1]
|
394 |
+
w = bbox[2]
|
395 |
+
h = bbox[3]
|
396 |
+
x_cen = x + w/2
|
397 |
+
y_cen = y + h/2
|
398 |
+
bacteria_count +=1
|
399 |
+
tracked_bacteria_data ={"Nr" : bacteria_count,'TID' : tid, 'PID' : pid,
|
400 |
+
'x [pixel]' : x_cen, 'y [pixel]' : y_cen }
|
401 |
+
tracked_raw_data = tracked_raw_data.append(tracked_bacteria_data, ignore_index=True)
|
402 |
+
|
403 |
+
if len(str(video_num))>0:
|
404 |
+
tracked_raw_data.to_csv(op_path+ f'TrackedRawData_{min_track_length}_video{video_num}_{strain_map[video_num]}.csv', index=False)
|
405 |
+
else:
|
406 |
+
tracked_raw_data.to_csv(op_path+ f'TrackedRawData_{min_track_length}_video{video_num}.csv', index=False)
|
407 |
+
|
408 |
+
#track id wise csa raw data
|
409 |
+
bacteria_count = 0
|
410 |
+
tracked_raw_data = pd.DataFrame(columns = ['Nr', 'TID', 'PID', 'x [pixel]', 'y [pixel]'])
|
411 |
+
for track_id in natsorted(track_id_details.keys()):
|
412 |
+
tid = track_id
|
413 |
+
for tracked_box in track_id_details[track_id]:
|
414 |
+
tracked_bacteria_data = []
|
415 |
+
pid = tracked_box["image_id"]
|
416 |
+
#print(pid)
|
417 |
+
bbox = tracked_box["bbox"]
|
418 |
+
x = bbox[0]
|
419 |
+
y = bbox[1]
|
420 |
+
w = bbox[2]
|
421 |
+
h = bbox[3]
|
422 |
+
x_cen = x + w/2
|
423 |
+
y_cen = y + h/2
|
424 |
+
bacteria_count +=1
|
425 |
+
tracked_bacteria_data ={"Nr" : bacteria_count,'TID' : tid, 'PID' : pid,
|
426 |
+
'x [pixel]' : x_cen, 'y [pixel]' : y_cen }
|
427 |
+
tracked_raw_data = tracked_raw_data.append(tracked_bacteria_data, ignore_index=True)
|
428 |
+
|
429 |
+
print("video_num:", video_num)
|
430 |
+
if len(str(video_num))>0:
|
431 |
+
tracked_raw_data.to_csv(op_path+ f'TrackedRawData_all_video{video_num}_{strain_map[video_num]}.csv', index=False)
|
432 |
+
else:
|
433 |
+
tracked_raw_data.to_csv(op_path+ f'TrackedRawData_all_video{video_num}.csv', index=False)
|
434 |
+
|
435 |
+
return op_path
|
436 |
+
|
437 |
+
|
438 |
+
|
439 |
+
if __name__ == "__main__":
|
440 |
+
ap = argparse.ArgumentParser(description='Training')
|
441 |
+
ap.add_argument('--video_num', default="", type=str, metavar='VIDEO')
|
442 |
+
ap.add_argument('--data_feature_path', default="19", type=str, metavar='PATH')
|
443 |
+
ap.add_argument('--data_root_path', default="19", type=str, metavar='PATH')
|
444 |
+
ap.add_argument('--plot_gt', action='store_true')
|
445 |
+
ap.add_argument('--plot', action='store_true')
|
446 |
+
ap.add_argument('--custom_test_dir', type=str, metavar='CELL PATH', default="")
|
447 |
+
ap.add_argument('--min_track_len', default = 60, type=int)
|
448 |
+
ap.add_argument('--video_map_path', default="data/videomap.txt", type=str, metavar='PATH')
|
449 |
+
|
450 |
+
args = ap.parse_args()
|
451 |
+
|
452 |
+
video_num = args.video_num
|
453 |
+
custom_test_dir = args.custom_test_dir
|
454 |
+
min_track_length = args.min_track_len
|
455 |
+
data_feature_path = args.data_feature_path
|
456 |
+
video_map_path = args.video_map_path
|
457 |
+
data_root_path = args.data_root_path
|
458 |
+
plot = args.plot
|
459 |
+
plot_gt = args.plot_gt
|
460 |
+
|
461 |
+
analyse_tracking(video_num, min_track_length=min_track_length, custom_test_dir=custom_test_dir,
|
462 |
+
data_feature_path=data_feature_path,
|
463 |
+
video_map_path=video_map_path, data_root_path=data_root_path,
|
464 |
+
plot=plot, plot_gt=plot_gt)
|
MEMTrack/src/__pycache__/GenerateTrackingData.cpython-38.pyc
ADDED
Binary file (6.56 kB). View file
|
|
MEMTrack/src/__pycache__/GenerateVideo.cpython-38.pyc
ADDED
Binary file (1.58 kB). View file
|
|
MEMTrack/src/__pycache__/Tracking.cpython-38.pyc
ADDED
Binary file (3.07 kB). View file
|
|
MEMTrack/src/__pycache__/TrackingAnalysis.cpython-38.pyc
ADDED
Binary file (9.55 kB). View file
|
|
MEMTrack/src/__pycache__/data_feature_gen.cpython-38.pyc
ADDED
Binary file (14 kB). View file
|
|
MEMTrack/src/__pycache__/data_prep_utils.cpython-38.pyc
ADDED
Binary file (11.1 kB). View file
|
|
MEMTrack/src/__pycache__/inferenceBacteriaRetinanet_Motility_v2.cpython-38.pyc
ADDED
Binary file (6.6 kB). View file
|
|
MEMTrack/src/__pycache__/sort.cpython-38.pyc
ADDED
Binary file (11.2 kB). View file
|
|
MEMTrack/src/data_feature_gen.py
ADDED
@@ -0,0 +1,628 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
import zipfile
|
2 |
+
import openpyxl
|
3 |
+
import os
|
4 |
+
import tqdm
|
5 |
+
import csv
|
6 |
+
import cv2
|
7 |
+
import shutil
|
8 |
+
import PIL
|
9 |
+
import glob
|
10 |
+
import pandas as pd
|
11 |
+
import numpy as np
|
12 |
+
from natsort import natsorted
|
13 |
+
from PIL import Image
|
14 |
+
import argparse
|
15 |
+
|
16 |
+
|
17 |
+
def create_video(data_dir):
|
18 |
+
# choose codec according to format needed
|
19 |
+
fourcc = cv2.VideoWriter_fourcc(*'mp4v')
|
20 |
+
#print(data_dir)
|
21 |
+
img_sample = cv2.imread(os.path.join(data_dir,"images/0.tif"))
|
22 |
+
#print(img_sample.shape)
|
23 |
+
height, width, channels = img_sample.shape
|
24 |
+
|
25 |
+
video = cv2.VideoWriter(data_dir + 'video.mp4', fourcc, 1, (width, height))
|
26 |
+
#data_dir = "./Data/video3/"
|
27 |
+
image_dir = os.path.join(data_dir, "images")
|
28 |
+
for frame in natsorted(os.listdir(image_dir)):
|
29 |
+
#print(frame)
|
30 |
+
img = cv2.imread(os.path.join(image_dir, frame))
|
31 |
+
video.write(img)
|
32 |
+
|
33 |
+
cv2.destroyAllWindows()
|
34 |
+
video.release()
|
35 |
+
|
36 |
+
|
37 |
+
|
38 |
+
def get_background(file_path, mean=True, sample=False):
|
39 |
+
if not os.path.exists(file_path):
|
40 |
+
create_video(file_path.rsplit("/",1)[0] +"/frame1")
|
41 |
+
cap = cv2.VideoCapture(file_path)
|
42 |
+
#print(cap.read())
|
43 |
+
# we will randomly select 50 frames for the calculating the median
|
44 |
+
#frame_indices = cap.get(cv2.CAP_PROP_FRAME_COUNT) * np.random.uniform(size=20)
|
45 |
+
frame_indices = list(range(0,int(cap.get(cv2.CAP_PROP_FRAME_COUNT)) ))
|
46 |
+
print(len(frame_indices))
|
47 |
+
# we will store the frames in array
|
48 |
+
frames = []
|
49 |
+
for idx in frame_indices:
|
50 |
+
# set the frame id to read that particular frame
|
51 |
+
cap.set(cv2.CAP_PROP_POS_FRAMES, idx)
|
52 |
+
ret, frame = cap.read()
|
53 |
+
#print(ret)
|
54 |
+
frames.append(frame)
|
55 |
+
if mean:
|
56 |
+
# calculate the mean
|
57 |
+
background_frame = np.mean(frames, axis=0).astype(np.uint8)
|
58 |
+
else:
|
59 |
+
# calculate the median
|
60 |
+
background_frame = np.median(frames, axis=0).astype(np.uint8)
|
61 |
+
if sample==True:
|
62 |
+
background_frame = cv2.imread("./Control_2_b0t5306c0x0-660y0-492.tiff")
|
63 |
+
#background_frame = cv2.imread("./RBS 2_1_b0t2791c0x0-660y0-492.tiff")
|
64 |
+
return background_frame
|
65 |
+
|
66 |
+
|
67 |
+
def get_absolute_consec_frame_diff_feature(video_path, max_consec_frame_diff=True, num_prior_frames=1):
|
68 |
+
cap = cv2.VideoCapture(video_path)
|
69 |
+
frames = []
|
70 |
+
differences = []
|
71 |
+
prev_frames = []
|
72 |
+
while True:
|
73 |
+
ret, frame = cap.read()
|
74 |
+
#print(frame)
|
75 |
+
if not ret:
|
76 |
+
break
|
77 |
+
#storing old frames till prior frame number
|
78 |
+
if num_prior_frames>0:
|
79 |
+
prev_frames.append(frame)
|
80 |
+
num_prior_frames -= 1
|
81 |
+
continue
|
82 |
+
#retrieving the previous xth frame
|
83 |
+
prev = prev_frames[0]
|
84 |
+
prev_frames.pop(0)
|
85 |
+
#computing frame diff between current and one previous
|
86 |
+
consecutive_diff = np.abs(frame - prev)
|
87 |
+
frames.append(frame) #creating frame list
|
88 |
+
#creating consecutive frame diff list
|
89 |
+
differences.append(consecutive_diff)
|
90 |
+
#creating consecutive frame diff features by taking the max at every pixel along the frame diff list
|
91 |
+
if max_consec_frame_diff:
|
92 |
+
max_abs_consec_diff_feature = np.max(differences, axis=0)
|
93 |
+
else:
|
94 |
+
max_abs_consec_diff_feature = np.min(differences, axis=0)
|
95 |
+
return max_abs_consec_diff_feature
|
96 |
+
|
97 |
+
|
98 |
+
|
99 |
+
def get_diff_from_absolute_consec_frame_diff_feature(image_path, frame_diff_feature, frame=False):
|
100 |
+
if frame==False:
|
101 |
+
image = PIL.Image.open(image_path).convert('L')
|
102 |
+
if frame == True:
|
103 |
+
#print("getting diff from frame")
|
104 |
+
image = Image.fromarray(image_path).convert('L')
|
105 |
+
L = image.getchannel(0)
|
106 |
+
frame_diff_feature = PIL.Image.fromarray(frame_diff_feature).convert('L')
|
107 |
+
frame_diff_feature.getbands()
|
108 |
+
L1 = frame_diff_feature.getchannel(0)
|
109 |
+
diff = PIL.ImageChops.difference(L, L1)
|
110 |
+
return diff
|
111 |
+
|
112 |
+
|
113 |
+
def get_absolute_all_frame_diff_feature(video_path, max_feature=True):
|
114 |
+
cap = cv2.VideoCapture(video_path)
|
115 |
+
frames = []
|
116 |
+
while True:
|
117 |
+
ret, frame = cap.read()
|
118 |
+
#print(frame)
|
119 |
+
if not ret:
|
120 |
+
break
|
121 |
+
frames.append(frame)
|
122 |
+
|
123 |
+
features = []
|
124 |
+
count = 0
|
125 |
+
for index in tqdm.tqdm(range(len(frames))):
|
126 |
+
differences = []
|
127 |
+
count+=1
|
128 |
+
frame = frames[index]
|
129 |
+
for index_1 in (range(len(frames))):
|
130 |
+
if index == index_1:
|
131 |
+
#print(count)
|
132 |
+
continue
|
133 |
+
frame1 = frames[index_1]
|
134 |
+
differences.append(np.abs(frame - frame1))
|
135 |
+
if max_feature:
|
136 |
+
max_diff_feature = np.max(differences, axis=0)
|
137 |
+
features.append(max_diff_feature)
|
138 |
+
else:
|
139 |
+
#print("min")
|
140 |
+
min_diff_feature = np.min(differences, axis=0)
|
141 |
+
features.append(min_diff_feature)
|
142 |
+
return features
|
143 |
+
|
144 |
+
|
145 |
+
|
146 |
+
#updated to include optical floiw computtaion from x frames prior
|
147 |
+
def gen_dense_optical_flow_data(method, video_path, params=[], to_gray=False, median=False, median_frame=None, num_frames_prior=1):
|
148 |
+
frames_optical_flow = []
|
149 |
+
frames_orignal = []
|
150 |
+
# Read the video and first x frames
|
151 |
+
cap = cv2.VideoCapture(video_path)
|
152 |
+
#print("fps",cap.get(cv2.CAP_PROP_FPS))
|
153 |
+
old_frames = []
|
154 |
+
for i in range(num_frames_prior):
|
155 |
+
ret, old_frame = cap.read()
|
156 |
+
# crate HSV & make Value a constant
|
157 |
+
hsv = np.zeros_like(old_frame)
|
158 |
+
hsv[..., 1] = 255
|
159 |
+
if to_gray:
|
160 |
+
old_frame = cv2.cvtColor(old_frame, cv2.COLOR_BGR2GRAY)
|
161 |
+
old_frames.append(old_frame)
|
162 |
+
|
163 |
+
#to compute optical flow from the median background
|
164 |
+
if median == True:
|
165 |
+
old_frame = median_frame
|
166 |
+
old_frame = cv2.cvtColor(old_frame, cv2.COLOR_GRAY2BGR)
|
167 |
+
|
168 |
+
|
169 |
+
|
170 |
+
|
171 |
+
while True:
|
172 |
+
# Read the next frame
|
173 |
+
ret, new_frame = cap.read()
|
174 |
+
frame_copy = new_frame
|
175 |
+
if not ret:
|
176 |
+
break
|
177 |
+
|
178 |
+
old_frame = old_frames[0]
|
179 |
+
if median == True:
|
180 |
+
old_frame = median_frame
|
181 |
+
|
182 |
+
# Preprocessing for exact method
|
183 |
+
if to_gray:
|
184 |
+
new_frame = cv2.cvtColor(new_frame, cv2.COLOR_BGR2GRAY)
|
185 |
+
|
186 |
+
|
187 |
+
# Calculate Optical Flow
|
188 |
+
|
189 |
+
flow = method(old_frame, new_frame, None, *params)
|
190 |
+
|
191 |
+
# Encoding: convert the algorithm's output into Polar coordinates
|
192 |
+
mag, ang = cv2.cartToPolar(flow[..., 0], flow[..., 1])
|
193 |
+
# Use Hue and Value to encode the Optical Flow
|
194 |
+
hsv[..., 0] = ang * 180 / np.pi / 2
|
195 |
+
hsv[..., 2] = cv2.normalize(mag, None, 0, 255, cv2.NORM_MINMAX)
|
196 |
+
|
197 |
+
# Convert HSV image into BGR for demo
|
198 |
+
bgr = cv2.cvtColor(hsv, cv2.COLOR_HSV2BGR)
|
199 |
+
#plt.imshow(frame_copy)
|
200 |
+
#plt.imshow(bgr)
|
201 |
+
# plt.imshow(v,cmap='gray', vmin=0, vmax=255)
|
202 |
+
# plt.show()
|
203 |
+
|
204 |
+
# Update the previous frame
|
205 |
+
old_frames.append(new_frame)
|
206 |
+
old_frames.pop(0)
|
207 |
+
frames_orignal.append(frame_copy)
|
208 |
+
frames_optical_flow.append(bgr)
|
209 |
+
return frames_orignal, frames_optical_flow
|
210 |
+
|
211 |
+
|
212 |
+
|
213 |
+
def get_background_diff_feature(image_path, background):
|
214 |
+
image = PIL.Image.open(image_path).convert('L')
|
215 |
+
L = image.getchannel(0)
|
216 |
+
background_img = PIL.Image.fromarray(background)
|
217 |
+
background_img.getbands()
|
218 |
+
L1 = background_img.getchannel(0)
|
219 |
+
diff = PIL.ImageChops.difference(L, L1)
|
220 |
+
return diff
|
221 |
+
|
222 |
+
def get_prev_frame_diff_feature(image_path, prev_image_path):
|
223 |
+
image = PIL.Image.open(image_path).convert('L')
|
224 |
+
L = image.getchannel(0)
|
225 |
+
image = PIL.Image.open(prev_image_path).convert('L')
|
226 |
+
L1 = image.getchannel(0)
|
227 |
+
diff = PIL.ImageChops.difference(L, L1)
|
228 |
+
return diff
|
229 |
+
|
230 |
+
def create_feature_image(image_path, background, prev_image_path, prev_image_path2=None):
|
231 |
+
prev_image_diff = get_prev_frame_diff_feature(image_path, prev_image_path)
|
232 |
+
image = PIL.Image.open(image_path).convert('L')
|
233 |
+
L = image.getchannel(0)
|
234 |
+
if prev_image_path2 is None:
|
235 |
+
background_diff = get_background_diff_feature(image_path, background)
|
236 |
+
newImagediff = PIL.Image.merge("RGB", [L, background_diff, prev_image_diff])
|
237 |
+
if prev_image_path2 is not None:
|
238 |
+
prev_image_diff2 = get_prev_frame_diff_feature(image_path, prev_image_path2)
|
239 |
+
newImagediff = PIL.Image.merge("RGB", [L, prev_image_diff2, prev_image_diff])
|
240 |
+
|
241 |
+
return newImagediff
|
242 |
+
|
243 |
+
|
244 |
+
|
245 |
+
def create_feature_image_optical_flow(frame, optical_flow, pure=False, background=None, optical_flow2=None, final_channel=False):
|
246 |
+
frame = Image.fromarray(frame).convert('L')
|
247 |
+
L = frame.getchannel(0)
|
248 |
+
flow = PIL.Image.fromarray(optical_flow)
|
249 |
+
#flow.save("flow.png")
|
250 |
+
hsv_optical_flow = cv2.cvtColor(optical_flow, cv2.COLOR_BGR2HSV)
|
251 |
+
h, s, v = cv2.split(hsv_optical_flow)
|
252 |
+
flow = PIL.Image.fromarray(v)
|
253 |
+
#flow.save("flow_hsv.png")
|
254 |
+
|
255 |
+
#print(v.shape)
|
256 |
+
last_channel = np.zeros([v.shape[0], v.shape[1]],dtype=np.uint8)
|
257 |
+
last_channel[:] = 255
|
258 |
+
v = Image.fromarray(v).convert('L')
|
259 |
+
v = v.getchannel(0)
|
260 |
+
|
261 |
+
|
262 |
+
#print(last_channel)
|
263 |
+
last_channel = Image.fromarray(last_channel).convert('L')
|
264 |
+
last_channel = last_channel.getchannel(0)
|
265 |
+
|
266 |
+
gray_optical_flow = Image.fromarray(cv2.cvtColor(optical_flow, cv2.COLOR_BGR2GRAY)).convert('L').getchannel(0)
|
267 |
+
|
268 |
+
feature_image = PIL.Image.merge("RGB", [L, v, last_channel])
|
269 |
+
feature_image = PIL.Image.merge("RGB", [L, gray_optical_flow, last_channel])
|
270 |
+
|
271 |
+
if pure == False:
|
272 |
+
if background is None:
|
273 |
+
gray_optical_flow2 = Image.fromarray(cv2.cvtColor(optical_flow2, cv2.COLOR_BGR2GRAY)).convert('L').getchannel(0)
|
274 |
+
feature_image = PIL.Image.merge("RGB", [L, gray_optical_flow, gray_optical_flow2])
|
275 |
+
#print(here)
|
276 |
+
else:
|
277 |
+
if final_channel == False:
|
278 |
+
background_img = PIL.Image.fromarray(background)
|
279 |
+
L1 = background_img.getchannel(0)
|
280 |
+
feature_image = PIL.Image.merge("RGB", [L, v, L1]) #-->org code
|
281 |
+
# diff = PIL.ImageChops.difference(L, L1)
|
282 |
+
# feature_image = PIL.Image.merge("RGB", [L, v, diff]) #-->up code
|
283 |
+
#print("here adding background")
|
284 |
+
#print("background")
|
285 |
+
if final_channel==True:
|
286 |
+
#print("final channel tru, just adding diff")
|
287 |
+
feature_image = PIL.Image.merge("RGB", [L, v, background])
|
288 |
+
|
289 |
+
|
290 |
+
|
291 |
+
# print(feature_image.size)
|
292 |
+
# print(feature_image.mode)
|
293 |
+
|
294 |
+
return feature_image
|
295 |
+
|
296 |
+
|
297 |
+
|
298 |
+
def save_data(image_dest, count, feature_image, frame_num, data_dir, video, minivideo, count_test, count_train, train):
|
299 |
+
#save image feature
|
300 |
+
feature_image.save(image_dest +"images_feature/"+str(count)+".tif")
|
301 |
+
|
302 |
+
text_file = str(frame_num) +".txt"
|
303 |
+
annotations_easy_source = os.path.join(data_dir, video, minivideo, "annotations_easy",
|
304 |
+
text_file)
|
305 |
+
annotations_easy_hard_source = os.path.join(data_dir, video, minivideo,
|
306 |
+
"annotations_easy_hard", text_file)
|
307 |
+
|
308 |
+
annotations_very_hard_source = os.path.join(data_dir, video, minivideo, "annotations_veryhard",
|
309 |
+
text_file)
|
310 |
+
annotations_easy_hard_veryhard_source = os.path.join(data_dir, video, minivideo,
|
311 |
+
"annotations_easy_hard_veryhard", text_file)
|
312 |
+
|
313 |
+
|
314 |
+
shutil.copy(annotations_easy_source, image_dest +"annotation_easy/" +str(count)+".txt")
|
315 |
+
shutil.copy(annotations_easy_hard_source, image_dest +"annotation_easy_hard/"
|
316 |
+
+ str(count)+".txt")
|
317 |
+
if os.path.exists(annotations_very_hard_source):
|
318 |
+
shutil.copy(annotations_very_hard_source, image_dest +"annotation_veryhard/" +str(count)+".txt")
|
319 |
+
if os.path.exists(annotations_easy_hard_veryhard_source):
|
320 |
+
shutil.copy(annotations_easy_hard_veryhard_source, image_dest +"annotation_easy_hard_veryhard/"
|
321 |
+
+ str(count)+".txt")
|
322 |
+
#create hard only annotation
|
323 |
+
text_file_easy_hard = open(annotations_easy_hard_source, 'r')
|
324 |
+
xy_coords_easy_hard = text_file_easy_hard.readlines()
|
325 |
+
|
326 |
+
text_file_easy = open(annotations_easy_source, 'r')
|
327 |
+
xy_coords_easy = text_file_easy.readlines()
|
328 |
+
|
329 |
+
xy_coords_hard = [coord for coord in xy_coords_easy_hard if coord not in xy_coords_easy ]
|
330 |
+
text_file_hard = open(image_dest +"annotation_hard/" +str(count)+".txt", 'w')
|
331 |
+
for coord in xy_coords_hard:
|
332 |
+
text_file_hard.write(coord)
|
333 |
+
text_file_hard.close()
|
334 |
+
|
335 |
+
annotations_low_motility_source = os.path.join(data_dir, video, minivideo, "annotations_motility_low",
|
336 |
+
text_file)
|
337 |
+
annotations_high_motility_source = os.path.join(data_dir, video, minivideo, "annotations_motility_high",
|
338 |
+
text_file)
|
339 |
+
annotations_mid_motility_source = os.path.join(data_dir, video, minivideo, "annotations_motility_mid",
|
340 |
+
text_file)
|
341 |
+
annotations_wiggle_motility_source = os.path.join(data_dir, video, minivideo, "annotations_motility_wiggle",
|
342 |
+
text_file)
|
343 |
+
|
344 |
+
annotations_sticking_motile_source = os.path.join(data_dir, video, minivideo, "annotations_sticking_motile",
|
345 |
+
text_file)
|
346 |
+
annotations_sticking_stick_source = os.path.join(data_dir, video, minivideo, "annotations_sticking_stick",
|
347 |
+
text_file)
|
348 |
+
annotations_sticking_non_motile_source = os.path.join(data_dir, video, minivideo, "annotations_sticking_non_motile",
|
349 |
+
text_file)
|
350 |
+
|
351 |
+
shutil.copy(annotations_low_motility_source, image_dest +"annotation_motility_low/" +str(count)+".txt")
|
352 |
+
shutil.copy(annotations_high_motility_source, image_dest +"annotation_motility_high/" +str(count)+".txt")
|
353 |
+
shutil.copy(annotations_mid_motility_source, image_dest +"annotation_motility_mid/" +str(count)+".txt")
|
354 |
+
shutil.copy(annotations_wiggle_motility_source, image_dest +"annotation_motility_wiggle/" +str(count)+".txt")
|
355 |
+
|
356 |
+
shutil.copy(annotations_sticking_stick_source, image_dest +"annotation_sticking_stick/" +str(count)+".txt")
|
357 |
+
shutil.copy(annotations_sticking_motile_source, image_dest +"annotation_sticking_motile/" +str(count)+".txt")
|
358 |
+
shutil.copy(annotations_sticking_non_motile_source, image_dest +"annotation_sticking_non_motile/" +str(count)+".txt")
|
359 |
+
|
360 |
+
|
361 |
+
if train == True:
|
362 |
+
count_train += 1
|
363 |
+
else:
|
364 |
+
count_test += 1
|
365 |
+
|
366 |
+
return count_test, count_train
|
367 |
+
|
368 |
+
|
369 |
+
|
370 |
+
#get backgorund frame for every mini video
|
371 |
+
#skip the first frame in every mini video
|
372 |
+
#store image in train images set
|
373 |
+
#store image+background diff + prev image diff in train images feature set
|
374 |
+
#similarly for test
|
375 |
+
|
376 |
+
def create_data(data_dir, dest_dir, trainfolder, train_video, testfolder, test_video, valfolder, val_video, method="background", num_prev=None, mean=False, sample=False, test_only = False, params=None, optical_flow_prior=1, frame_diff_prior=1):
|
377 |
+
os.makedirs(dest_dir, exist_ok=True)
|
378 |
+
data_dir_types = ["/images/", "/images_feature/", "/annotation_easy/", "/annotation_hard/", "/annotation_easy_hard/", "/annotation_easy_hard_veryhard/" , "/annotation_veryhard/" , "/annotation_motility_low", "/annotation_motility_wiggle", "/annotation_motility_mid", "/annotation_motility_high", "/annotation_sticking_stick", "/annotation_sticking_motile", "/annotation_sticking_non_motile"]
|
379 |
+
for video in test_video:
|
380 |
+
for dir_type in data_dir_types:
|
381 |
+
os.makedirs(os.path.join(dest_dir, testfolder, video) + dir_type, exist_ok=True)
|
382 |
+
|
383 |
+
for video in val_video:
|
384 |
+
for dir_type in data_dir_types:
|
385 |
+
if test_only == True:
|
386 |
+
continue
|
387 |
+
os.makedirs(os.path.join(dest_dir, valfolder, video) + dir_type, exist_ok=True)
|
388 |
+
|
389 |
+
for dir_type in data_dir_types:
|
390 |
+
os.makedirs(os.path.join(dest_dir, testfolder) + dir_type, exist_ok=True)
|
391 |
+
if test_only == True:
|
392 |
+
continue
|
393 |
+
os.makedirs(os.path.join(dest_dir, valfolder) + dir_type, exist_ok=True)
|
394 |
+
os.makedirs(dest_dir+trainfolder + dir_type, exist_ok=True)
|
395 |
+
|
396 |
+
count_train = 0
|
397 |
+
count_test = 0
|
398 |
+
count_test_all = 0
|
399 |
+
count_val_all = 0
|
400 |
+
|
401 |
+
for video in natsorted(os.listdir(data_dir)):
|
402 |
+
|
403 |
+
if not video.startswith('.') and os.path.isdir(os.path.join(data_dir,video))==True:
|
404 |
+
if test_only == True and video not in test_video:
|
405 |
+
continue
|
406 |
+
if video not in train_video + test_video + val_video:
|
407 |
+
continue
|
408 |
+
for minivideo in natsorted(os.listdir(os.path.join(data_dir,video))) :
|
409 |
+
if not minivideo.startswith('.') and os.path.isdir(os.path.join(data_dir,video,minivideo)) == True:
|
410 |
+
video_path = os.path.join(data_dir,video) + "/" + minivideo + "video.mp4"
|
411 |
+
print(video_path)
|
412 |
+
|
413 |
+
if method == "background":
|
414 |
+
# get the background model
|
415 |
+
background = get_background(video_path, mean, sample=sample)
|
416 |
+
# convert the background model to grayscale format
|
417 |
+
background = cv2.cvtColor(background, cv2.COLOR_BGR2GRAY)
|
418 |
+
skip_frame_list = [0]
|
419 |
+
if method == "nprev":
|
420 |
+
skip_frame_list = list(range(num_prev))
|
421 |
+
if method in ["diff_from_max_absolute_consecutive_frame_diff", "max_absolute_consecutive_frame_diff"]:
|
422 |
+
max_absolute_consecutive_frame_diff = get_absolute_consec_frame_diff_feature(video_path, max_consec_frame_diff=True)
|
423 |
+
if method in ["max_absolute_all_frame_diff"]:
|
424 |
+
absolute_all_frame_diff = get_absolute_all_frame_diff_feature(video_path, max_feature=True)
|
425 |
+
if method in ["min_absolute_all_frame_diff"]:
|
426 |
+
absolute_all_frame_diff = get_absolute_all_frame_diff_feature(video_path, max_feature=False)
|
427 |
+
|
428 |
+
|
429 |
+
|
430 |
+
if video not in (test_video + val_video):
|
431 |
+
#print(video)
|
432 |
+
train=True
|
433 |
+
else:
|
434 |
+
train=False
|
435 |
+
count_test = 0
|
436 |
+
if video in test_video:
|
437 |
+
testfolder_video = os.path.join(testfolder, video)
|
438 |
+
else:
|
439 |
+
testfolder_video = os.path.join(valfolder, video)
|
440 |
+
|
441 |
+
|
442 |
+
if method in ["optical_flow", "optical_flow_median_back", "optical_flow_from_median_frame",
|
443 |
+
"optical_flow_combined", "diff_from_max_absolute_consecutive_frame_diff",
|
444 |
+
"max_absolute_consecutive_frame_diff", "min_absolute_all_frame_diff",
|
445 |
+
"max_absolute_all_frame_diff"]:
|
446 |
+
#print(method)
|
447 |
+
method_flow = cv2.calcOpticalFlowFarneback
|
448 |
+
#params = [0.5, 3, 15, 3, 5, 1.2, 0] # default Farneback's algorithm parameters
|
449 |
+
# params = [0.5, 4, 18, 3, 5, 1.2, 0]
|
450 |
+
#params = [0.5, 2, 18, 3, 5, 1.2, 0]
|
451 |
+
#params = [0.5, 2, 20, 3, 5, 1.2, 0]
|
452 |
+
|
453 |
+
background = get_background(video_path, mean, sample=sample)
|
454 |
+
# convert the background model to grayscale format
|
455 |
+
background = cv2.cvtColor(background, cv2.COLOR_BGR2GRAY)
|
456 |
+
|
457 |
+
if method == "optical_flow_from_median_frame":
|
458 |
+
frames_org, frames_optical_flow = gen_dense_optical_flow_data(method_flow, video_path, params, to_gray=True, median=True, median_frame=background)
|
459 |
+
else :
|
460 |
+
frames_org, frames_optical_flow = gen_dense_optical_flow_data(method_flow, video_path, params, to_gray=True,num_frames_prior=optical_flow_prior)
|
461 |
+
|
462 |
+
if method == "optical_flow_combined":
|
463 |
+
m_frames_org, m_frames_optical_flow = gen_dense_optical_flow_data(method_flow, video_path, params, to_gray=True, median=True, median_frame=background)
|
464 |
+
c_frames_org, c_frames_optical_flow = gen_dense_optical_flow_data(method_flow, video_path, params, to_gray=True)
|
465 |
+
# print(len(frames_org))
|
466 |
+
# print(frames_org[0].shape)
|
467 |
+
# print(len(frames_optical_flow))
|
468 |
+
# print(frames_optical_flow[0].shape)
|
469 |
+
|
470 |
+
if method in ["optical_flow", "optical_flow_from_median_frame"]:
|
471 |
+
pure = True
|
472 |
+
else:
|
473 |
+
pure = False
|
474 |
+
|
475 |
+
|
476 |
+
print(len(frames_org))
|
477 |
+
for i, frame in enumerate(frames_org):
|
478 |
+
#save frame in images
|
479 |
+
if train==True:
|
480 |
+
image_dest = dest_dir + "/" +trainfolder +"/"
|
481 |
+
count = count_train
|
482 |
+
else:
|
483 |
+
#print(video)
|
484 |
+
image_dest = dest_dir +"/" + testfolder_video + "/"
|
485 |
+
count = count_test
|
486 |
+
|
487 |
+
#print(frame.shape)
|
488 |
+
img = Image.fromarray(frame, "RGB")
|
489 |
+
img.save(image_dest +"images/"+str(count)+".tif")
|
490 |
+
if method == "optical_flow_combined":
|
491 |
+
feature_image = create_feature_image_optical_flow(frame, c_frames_optical_flow[i], pure=pure, optical_flow2=m_frames_optical_flow[i])
|
492 |
+
elif method == "diff_from_max_absolute_consecutive_frame_diff":
|
493 |
+
diff_from_max_absolute_consecutive_frame_diff = get_diff_from_absolute_consec_frame_diff_feature(frame_diff_feature=max_absolute_consecutive_frame_diff, image_path=frame, frame=True)
|
494 |
+
feature_image = create_feature_image_optical_flow(frame, frames_optical_flow[i], pure, diff_from_max_absolute_consecutive_frame_diff, final_channel=True)
|
495 |
+
elif method == "max_absolute_consecutive_frame_diff":
|
496 |
+
feature_image = create_feature_image_optical_flow(frame, frames_optical_flow[i], pure, max_absolute_consecutive_frame_diff, final_channel=False)
|
497 |
+
elif method in ["max_absolute_all_frame_diff", "min_absolute_all_frame_diff"]:
|
498 |
+
feature_image = create_feature_image_optical_flow(frame, frames_optical_flow[i], pure, absolute_all_frame_diff[i+1], final_channel=False)
|
499 |
+
else:
|
500 |
+
feature_image = create_feature_image_optical_flow(frame, frames_optical_flow[i], pure, background)
|
501 |
+
frame_num = i+1
|
502 |
+
|
503 |
+
count_test, count_train = save_data(image_dest, count, feature_image, frame_num, data_dir, video, minivideo, count_test,
|
504 |
+
count_train, train)
|
505 |
+
if train == False:
|
506 |
+
if video in test_video:
|
507 |
+
count = count_test_all
|
508 |
+
image_dest_all = dest_dir +"/" + testfolder + "/"
|
509 |
+
count_test_all, count_train = save_data(image_dest_all, count, feature_image, frame_num, data_dir, video, minivideo, count_test_all,
|
510 |
+
count_train, train)
|
511 |
+
else:
|
512 |
+
count = count_val_all
|
513 |
+
image_dest_all = dest_dir +"/" + valfolder + "/"
|
514 |
+
count_val_all, count_train = save_data(image_dest_all, count, feature_image, frame_num, data_dir, video, minivideo, count_val_all,
|
515 |
+
count_train, train)
|
516 |
+
img.save(image_dest_all +"images/"+str(count)+".tif")
|
517 |
+
|
518 |
+
|
519 |
+
else:
|
520 |
+
# print("in else")
|
521 |
+
# print(method)
|
522 |
+
for frame in natsorted(os.listdir(os.path.join(data_dir, video, minivideo, "images"))):
|
523 |
+
frame_num = int(frame.split(".tif")[0])
|
524 |
+
#skip first frame
|
525 |
+
if frame_num not in skip_frame_list:
|
526 |
+
#save frame in images
|
527 |
+
images_source = os.path.join(data_dir, video, minivideo, "images", frame)
|
528 |
+
if train==True:
|
529 |
+
image_dest = dest_dir + "/" +trainfolder +"/"
|
530 |
+
count = count_train
|
531 |
+
#print(count)
|
532 |
+
else:
|
533 |
+
#print(video)
|
534 |
+
image_dest = dest_dir +"/" + testfolder_video + "/"
|
535 |
+
image_dest_all = dest_dir +"/" + testfolder + "/"
|
536 |
+
count = count_test
|
537 |
+
|
538 |
+
shutil.copy(images_source, image_dest +"images/"+str(count)+".tif")
|
539 |
+
|
540 |
+
#create new image
|
541 |
+
prev_frame = str(frame_num-1) +".tif"
|
542 |
+
prev_image = os.path.join(data_dir, video, minivideo, "images", prev_frame)
|
543 |
+
if method == "background":
|
544 |
+
feature_image = create_feature_image(image_path=images_source, background=background,prev_image_path=prev_image)
|
545 |
+
if method == "nprev":
|
546 |
+
background=None
|
547 |
+
prev_frame2 = str(frame_num-num_prev) +".tif"
|
548 |
+
prev_image2 = os.path.join(data_dir, video, minivideo, "images", prev_frame2)
|
549 |
+
feature_image = create_feature_image(image_path=images_source, background=background,
|
550 |
+
prev_image_path=prev_image, prev_image_path2=prev_image2)
|
551 |
+
|
552 |
+
|
553 |
+
|
554 |
+
count_test, count_train = save_data(image_dest, count, feature_image, frame_num, data_dir, video, minivideo, count_test, count_train, train)
|
555 |
+
if train == False:
|
556 |
+
if video in test_video:
|
557 |
+
count = count_test_all
|
558 |
+
image_dest_all = dest_dir +"/" + testfolder + "/"
|
559 |
+
count_test_all, count_train = save_data(image_dest_all, count, feature_image, frame_num, data_dir, video, minivideo, count_test_all,
|
560 |
+
count_train, train)
|
561 |
+
else:
|
562 |
+
count = count_val_all
|
563 |
+
image_dest_all = dest_dir +"/" + valfolder + "/"
|
564 |
+
count_val_all, count_train = save_data(image_dest_all, count, feature_image, frame_num, data_dir, video, minivideo, count_val_all,
|
565 |
+
count_train, train)
|
566 |
+
shutil.copy(images_source, image_dest_all +"images/"+str(count)+".tif")
|
567 |
+
|
568 |
+
|
569 |
+
|
570 |
+
def create_train_data(target_data_sub_dir, dest_sub_dir, exp_name, train_video, test_video, val_video, feature_method="optical_flow_median_back"):
|
571 |
+
feature_dir = "data_feature_optical_flow_median_back_2pyr_18win_background_img/"
|
572 |
+
data_dir = target_data_sub_dir
|
573 |
+
dest_dir = os.path.join(dest_sub_dir, exp_name, feature_dir)
|
574 |
+
trainfolder = "train"
|
575 |
+
testfolder = "test"
|
576 |
+
valfolder = "val"
|
577 |
+
params = [0.5, 2, 18, 3, 5, 1.2, 0]
|
578 |
+
|
579 |
+
create_data(data_dir, dest_dir, trainfolder, train_video, testfolder, test_video, valfolder, val_video,
|
580 |
+
method=feature_method, params=params)
|
581 |
+
|
582 |
+
|
583 |
+
|
584 |
+
def create_test_data(target_data_sub_dir, dest_sub_dir, exp_name, test_video_list, feature_method="optical_flow_median_back"):
|
585 |
+
params = [0.5, 2, 18, 3, 5, 1.2, 0]
|
586 |
+
for video in test_video_list:
|
587 |
+
print(video)
|
588 |
+
data_dir = target_data_sub_dir
|
589 |
+
dest_dir = os.path.join(dest_sub_dir, exp_name , f"data_{video}_feature_optical_flow_median_back_2pyr_18win_background_img/")
|
590 |
+
trainfolder = "train"
|
591 |
+
testfolder = "test"
|
592 |
+
valfolder = "val"
|
593 |
+
val_video = []
|
594 |
+
train_video = []
|
595 |
+
test_video = [f"{video}"]
|
596 |
+
params = [0.5, 2, 18, 3, 5, 1.2, 0]
|
597 |
+
|
598 |
+
create_data(data_dir, dest_dir, trainfolder, train_video, testfolder, test_video, valfolder, val_video,
|
599 |
+
method=feature_method, test_only = True, params=params)
|
600 |
+
|
601 |
+
|
602 |
+
|
603 |
+
|
604 |
+
if __name__ == "__main__":
|
605 |
+
|
606 |
+
parser = argparse.ArgumentParser(description="Feature Preparation")
|
607 |
+
parser.add_argument("--target_data_sub_dir", default = "MEMTrack/data/collagen/", help="Path to the folder to process")
|
608 |
+
parser.add_argument("--dest_sub_dir", default ="MEMTrack/DataFeatures/", help="Path to the folder to process")
|
609 |
+
parser.add_argument("--exp_name", default ="collagen_motility_inference", help="Path to the folder to process")
|
610 |
+
parser.add_argument("--feature_method", default="optical_flow_median_back", help="Path to the folder to process")
|
611 |
+
# optical flow median back with optical_Flow_prior is optical flow from xth previous frame
|
612 |
+
# "diff_from_max_absolute_consecutive_frame_diff" creates a feature for diff from the "max consecutive frame diff" feature, with a frame diff prior for xth frame diff
|
613 |
+
parser.add_argument('--train_video', type=str, nargs='+', help='a list of strings', default=[])
|
614 |
+
parser.add_argument('--val_video',type=str, nargs='+', help='a list of strings', default=[])
|
615 |
+
parser.add_argument('--test_video', type=str, nargs='+', help='a list of strings', default=[])
|
616 |
+
|
617 |
+
args = parser.parse_args(args)
|
618 |
+
target_data_sub_dir = args.target_data_sub_dir
|
619 |
+
dest_sub_dir = args.dest_sub_dir
|
620 |
+
exp_name = args.exp_name
|
621 |
+
feature_method = args.feature_method
|
622 |
+
train_video = args.train_video
|
623 |
+
test_video = args.test_video
|
624 |
+
val_video = args.val_video
|
625 |
+
|
626 |
+
create_train_data(target_data_sub_dir, dest_sub_dir, exp_name, train_video, test_video, val_video, feature_method)
|
627 |
+
create_test_data(target_data_sub_dir, dest_sub_dir, exp_name, test_video_list, feature_method)
|
628 |
+
|
MEMTrack/src/data_prep_utils.py
ADDED
@@ -0,0 +1,531 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
import zipfile
|
2 |
+
import openpyxl
|
3 |
+
import os
|
4 |
+
import csv
|
5 |
+
import sys
|
6 |
+
import cv2
|
7 |
+
import shutil
|
8 |
+
import PIL
|
9 |
+
import glob
|
10 |
+
import pandas as pd
|
11 |
+
import numpy as np
|
12 |
+
from natsort import natsorted
|
13 |
+
from PIL import Image
|
14 |
+
import argparse
|
15 |
+
|
16 |
+
def load_map(video_map_path):
|
17 |
+
# Initialize an empty dictionary
|
18 |
+
video_dict = {}
|
19 |
+
|
20 |
+
# Open and read the text file
|
21 |
+
with open(video_map_path, 'r') as file:
|
22 |
+
next(file)
|
23 |
+
for line in file:
|
24 |
+
#print(line)
|
25 |
+
# Split each line into ID and name using the comma as a separator
|
26 |
+
video_id, video_name = line.strip().split(',')
|
27 |
+
|
28 |
+
# Convert the ID to an integer if needed
|
29 |
+
video_id = int(video_id)
|
30 |
+
|
31 |
+
# Add the key-value pair to the dictionary
|
32 |
+
video_dict[video_id] = video_name
|
33 |
+
|
34 |
+
# Print the loaded dictionary
|
35 |
+
# print(video_dict)
|
36 |
+
return video_dict
|
37 |
+
|
38 |
+
#find video number from video map
|
39 |
+
def find_next_video_num(data_dir):
|
40 |
+
#data_dir = "../Data/"
|
41 |
+
data_sub_dirs = glob.glob(f'{data_dir}/*')
|
42 |
+
print("data dirs: ", data_sub_dirs)
|
43 |
+
video_dirs = glob.glob(f'{data_dir}/*/*')
|
44 |
+
max_id = 0
|
45 |
+
for video in video_dirs:
|
46 |
+
#print(video)
|
47 |
+
if "ipynb" not in video:
|
48 |
+
video_id = int(video.split("video")[1])
|
49 |
+
#print(video_id)
|
50 |
+
max_id = max(video_id, max_id)
|
51 |
+
print("last video num: ",max_id)
|
52 |
+
return max_id +1
|
53 |
+
|
54 |
+
def video_in_videomap(video_map_path, folder):
|
55 |
+
# load videomap
|
56 |
+
video_list = [value.strip() for value in load_map(video_map_path).values()]
|
57 |
+
video_list = [value.lstrip("_") for value in video_list]
|
58 |
+
|
59 |
+
if os.path.basename(folder.split(".zip")[0]) in video_list:
|
60 |
+
return True
|
61 |
+
|
62 |
+
def add_video_to_videomap(video_map_path, video_dir, final_data_dir):
|
63 |
+
video_num = find_next_video_num(final_data_dir)
|
64 |
+
print("curr video num: ",video_num)
|
65 |
+
|
66 |
+
if not os.path.exists(video_map_path):
|
67 |
+
video_map = open(video_map_path,'a+')
|
68 |
+
video_map.write("video_num, strain")
|
69 |
+
video_map.close()
|
70 |
+
|
71 |
+
video_map = open(video_map_path,'a+')
|
72 |
+
video_strain = os.path.basename(video_dir.strip().strip("/"))
|
73 |
+
video_map.write("\n")
|
74 |
+
video_map.write(f"{video_num}, {video_strain}")
|
75 |
+
video_map.close()
|
76 |
+
|
77 |
+
print("Added to videomap")
|
78 |
+
return video_num
|
79 |
+
|
80 |
+
|
81 |
+
def create_annotations(video_dir, csv_file_name, inference_mode, video_num, target_data_sub_dir ):
|
82 |
+
count = 0
|
83 |
+
images_source = os.path.join(video_dir, "Images without Labels")
|
84 |
+
files = natsorted(os.listdir(images_source))
|
85 |
+
# files[:5]
|
86 |
+
data_dir = video_dir
|
87 |
+
all_files = os.listdir(data_dir)
|
88 |
+
if not inference_mode:
|
89 |
+
try:
|
90 |
+
#csv_file_name = list(filter(lambda f: f.endswith('Tracked.csv'), all_files))[0] if list(filter(lambda f: f.endswith('.csv'), all_files)) else list(filter(lambda f: f.endswith('.xlsx'), all_files))[0]
|
91 |
+
#csv_file_name = "Raw Data.csv"
|
92 |
+
csv_file = os.path.join(data_dir, csv_file_name)
|
93 |
+
# print(csv_file_name)
|
94 |
+
if ".csv" not in csv_file_name:
|
95 |
+
read_file = pd.read_excel(csv_file, engine='openpyxl')
|
96 |
+
read_file.to_csv (csv_file.split("xlsx")[0]+".csv",
|
97 |
+
index = None, header=True)
|
98 |
+
csv_file = csv_file.split("xlsx")[0]+".csv"
|
99 |
+
# print(csv_file)
|
100 |
+
file = open(csv_file)
|
101 |
+
except:
|
102 |
+
print("No Raw Data present, assuming Inference mode only")
|
103 |
+
inference_mode=True
|
104 |
+
|
105 |
+
if not inference_mode:
|
106 |
+
csvreader = csv.reader(file)
|
107 |
+
header = []
|
108 |
+
header = next(csvreader)
|
109 |
+
# header = header[0].split("\t")
|
110 |
+
print(header)
|
111 |
+
header = [x.lower() for x in header]
|
112 |
+
img_id = header.index('pid') # header.index('t [Frame]')
|
113 |
+
x_id = header.index('x [pixel]')
|
114 |
+
y_id = header.index('y [pixel]')
|
115 |
+
try:
|
116 |
+
sticking_id = header.index('sticking')
|
117 |
+
subpop_id = header.index('subpopulation')
|
118 |
+
except:
|
119 |
+
sticking_id = None
|
120 |
+
subpop_id = None
|
121 |
+
try:
|
122 |
+
diff_id = header.index('e/h')
|
123 |
+
|
124 |
+
except:
|
125 |
+
diff_id = None
|
126 |
+
print("No diff id found, assuming all easy")
|
127 |
+
# print(img_id)
|
128 |
+
# print(x_id)
|
129 |
+
# print(y_id)
|
130 |
+
# print(sticking_id)
|
131 |
+
# print(subpop_id)
|
132 |
+
|
133 |
+
rows = []
|
134 |
+
for row in csvreader:
|
135 |
+
rows.append(row)
|
136 |
+
#rows
|
137 |
+
file.close()
|
138 |
+
|
139 |
+
dest_dir = os.path.join(target_data_sub_dir, f"video{video_num}/")
|
140 |
+
# print("dest_dir: ", dest_dir)
|
141 |
+
images_target =os.path.join(dest_dir, "frame1/images/")
|
142 |
+
annotations_easy = os.path.join(dest_dir, "frame1/annotations_easy/")
|
143 |
+
annotations_easy_hard = os.path.join(dest_dir, "frame1/annotations_easy_hard/")
|
144 |
+
annotations_hard = os.path.join(dest_dir, "frame1/annotations_hard/")
|
145 |
+
annotations_very_hard = os.path.join(dest_dir, "frame1/annotations_veryhard/")
|
146 |
+
annotations_easy_hard_veryhard = os.path.join(dest_dir, "frame1/annotations_easy_hard_veryhard/")
|
147 |
+
|
148 |
+
annotations_motility_high = os.path.join(dest_dir, "frame1/annotations_motility_high/")
|
149 |
+
annotations_motility_low = os.path.join(dest_dir, "frame1/annotations_motility_low/")
|
150 |
+
annotations_motility_wiggle = os.path.join(dest_dir, "frame1/annotations_motility_wiggle/")
|
151 |
+
annotations_motility_mid = os.path.join(dest_dir, "frame1/annotations_motility_mid/")
|
152 |
+
|
153 |
+
annotations_sticking_motile = os.path.join(dest_dir, "frame1/annotations_sticking_motile/")
|
154 |
+
annotations_sticking_non_motile = os.path.join(dest_dir, "frame1/annotations_sticking_non_motile/")
|
155 |
+
annotations_sticking_stick = os.path.join(dest_dir, "frame1/annotations_sticking_stick/")
|
156 |
+
|
157 |
+
|
158 |
+
os.makedirs(images_target, exist_ok=True)
|
159 |
+
os.makedirs(annotations_easy_hard, exist_ok=True)
|
160 |
+
os.makedirs(annotations_easy_hard_veryhard, exist_ok=True)
|
161 |
+
os.makedirs(annotations_hard, exist_ok=True)
|
162 |
+
os.makedirs(annotations_easy, exist_ok=True)
|
163 |
+
os.makedirs(annotations_very_hard, exist_ok=True)
|
164 |
+
|
165 |
+
|
166 |
+
os.makedirs(annotations_sticking_stick, exist_ok=True)
|
167 |
+
os.makedirs(annotations_sticking_motile, exist_ok=True)
|
168 |
+
os.makedirs(annotations_sticking_non_motile, exist_ok=True)
|
169 |
+
|
170 |
+
os.makedirs(annotations_motility_high, exist_ok=True)
|
171 |
+
os.makedirs(annotations_motility_wiggle, exist_ok=True)
|
172 |
+
os.makedirs(annotations_motility_mid, exist_ok=True)
|
173 |
+
os.makedirs(annotations_motility_low, exist_ok=True)
|
174 |
+
|
175 |
+
for i,image in enumerate(files):
|
176 |
+
#copy and rename images
|
177 |
+
image_name = str(count) +".tif"
|
178 |
+
shutil.copy(os.path.join(images_source, image), os.path.join(images_target,image_name))
|
179 |
+
# print(os.path.join(images_source, image))
|
180 |
+
# print(os.path.join(images_target,image_name))
|
181 |
+
#create annoatations txt file
|
182 |
+
#image_id = int(files_track[i].split(")")[1].split(".tif")[0])
|
183 |
+
txt_file = open(annotations_easy_hard + str(count) +".txt",'w')
|
184 |
+
txt_file_easy = open(annotations_easy + str(count) +".txt",'w')
|
185 |
+
txt_file_hard = open(annotations_hard + str(count) +".txt",'w')
|
186 |
+
txt_file_very_hard = open(annotations_very_hard + str(count) +".txt",'w')
|
187 |
+
txt_file_all = open(annotations_easy_hard_veryhard + str(count) +".txt",'w')
|
188 |
+
|
189 |
+
txt_file_motility_high = open(annotations_motility_high + str(count) +".txt",'w')
|
190 |
+
txt_file_motility_low = open(annotations_motility_low + str(count) +".txt",'w')
|
191 |
+
txt_file_motility_wiggle = open(annotations_motility_wiggle + str(count) +".txt",'w')
|
192 |
+
txt_file_motility_mid = open(annotations_motility_mid + str(count) +".txt",'w')
|
193 |
+
|
194 |
+
txt_file_sticking_stick = open(annotations_sticking_stick + str(count) +".txt",'w')
|
195 |
+
txt_file_sticking_motile = open(annotations_sticking_motile + str(count) +".txt",'w')
|
196 |
+
txt_file_sticking_non_motile = open(annotations_sticking_non_motile + str(count) +".txt",'w')
|
197 |
+
|
198 |
+
if not inference_mode:
|
199 |
+
for row in rows:
|
200 |
+
# print(image_id)
|
201 |
+
# print( row[img_id])
|
202 |
+
##print(row)
|
203 |
+
if int(row[img_id])-1 == int(count):#PID starts from 1
|
204 |
+
# print(image_id)
|
205 |
+
# print( row[img_id])
|
206 |
+
txt_file_all.write(row[x_id])
|
207 |
+
txt_file_all.write(" ")
|
208 |
+
txt_file_all.write(row[y_id])
|
209 |
+
txt_file_all.write("\n")
|
210 |
+
try:
|
211 |
+
if row[diff_id]=="E":
|
212 |
+
txt_file_easy.write(row[x_id])
|
213 |
+
txt_file_easy.write(" ")
|
214 |
+
txt_file_easy.write(row[y_id])
|
215 |
+
txt_file_easy.write("\n")
|
216 |
+
|
217 |
+
txt_file.write(row[x_id])
|
218 |
+
txt_file.write(" ")
|
219 |
+
txt_file.write(row[y_id])
|
220 |
+
txt_file.write("\n")
|
221 |
+
|
222 |
+
if row[diff_id]=="H":
|
223 |
+
txt_file_hard.write(row[x_id])
|
224 |
+
txt_file_hard.write(" ")
|
225 |
+
txt_file_hard.write(row[y_id])
|
226 |
+
txt_file_hard.write("\n")
|
227 |
+
|
228 |
+
txt_file.write(row[x_id])
|
229 |
+
txt_file.write(" ")
|
230 |
+
txt_file.write(row[y_id])
|
231 |
+
txt_file.write("\n")
|
232 |
+
|
233 |
+
if row[diff_id]=="VH":
|
234 |
+
txt_file_very_hard.write(row[x_id])
|
235 |
+
txt_file_very_hard.write(" ")
|
236 |
+
txt_file_very_hard.write(row[y_id])
|
237 |
+
txt_file_very_hard.write("\n")
|
238 |
+
|
239 |
+
if row[subpop_id]=="L":
|
240 |
+
txt_file_motility_low.write(row[x_id])
|
241 |
+
txt_file_motility_low.write(" ")
|
242 |
+
txt_file_motility_low.write(row[y_id])
|
243 |
+
txt_file_motility_low.write("\n")
|
244 |
+
if row[subpop_id]=="W":
|
245 |
+
#print("wiggle")
|
246 |
+
txt_file_motility_wiggle.write(row[x_id])
|
247 |
+
txt_file_motility_wiggle.write(" ")
|
248 |
+
txt_file_motility_wiggle.write(row[y_id])
|
249 |
+
txt_file_motility_wiggle.write("\n")
|
250 |
+
if row[subpop_id]=="M":
|
251 |
+
txt_file_motility_mid.write(row[x_id])
|
252 |
+
txt_file_motility_mid.write(" ")
|
253 |
+
txt_file_motility_mid.write(row[y_id])
|
254 |
+
txt_file_motility_mid.write("\n")
|
255 |
+
if row[subpop_id]=="H":
|
256 |
+
txt_file_motility_high.write(row[x_id])
|
257 |
+
txt_file_motility_high.write(" ")
|
258 |
+
txt_file_motility_high.write(row[y_id])
|
259 |
+
txt_file_motility_high.write("\n")
|
260 |
+
|
261 |
+
if row[sticking_id]=="S":
|
262 |
+
txt_file_sticking_stick.write(row[x_id])
|
263 |
+
txt_file_sticking_stick.write(" ")
|
264 |
+
txt_file_sticking_stick.write(row[y_id])
|
265 |
+
txt_file_sticking_stick.write("\n")
|
266 |
+
if row[sticking_id]=="M":
|
267 |
+
txt_file_sticking_motile.write(row[x_id])
|
268 |
+
txt_file_sticking_motile.write(" ")
|
269 |
+
txt_file_sticking_motile.write(row[y_id])
|
270 |
+
txt_file_sticking_motile.write("\n")
|
271 |
+
if row[sticking_id]=="NM":
|
272 |
+
txt_file_sticking_non_motile.write(row[x_id])
|
273 |
+
txt_file_sticking_non_motile.write(" ")
|
274 |
+
txt_file_sticking_non_motile.write(row[y_id])
|
275 |
+
txt_file_sticking_non_motile.write("\n")
|
276 |
+
except:
|
277 |
+
txt_file_easy.write(row[x_id])
|
278 |
+
txt_file_easy.write(" ")
|
279 |
+
txt_file_easy.write(row[y_id])
|
280 |
+
txt_file_easy.write("\n")
|
281 |
+
|
282 |
+
txt_file.close()
|
283 |
+
count = count+1
|
284 |
+
print("Annotations processed")
|
285 |
+
return inference_mode
|
286 |
+
|
287 |
+
|
288 |
+
|
289 |
+
#run only once to generate bacteria data
|
290 |
+
def generate_bacteria_data(file, video_dir):
|
291 |
+
# Generate Bacteria Tracks specific data for Bacteria Analysis
|
292 |
+
|
293 |
+
csvreader = csv.reader(file)
|
294 |
+
header = []
|
295 |
+
header = next(csvreader)
|
296 |
+
# header = header[0].split("\t")
|
297 |
+
print(header)
|
298 |
+
header = [x.lower() for x in header]
|
299 |
+
img_id = header.index('pid') # header.index('t [Frame]')
|
300 |
+
x_id = header.index('x [pixel]')
|
301 |
+
y_id = header.index('y [pixel]')
|
302 |
+
try:
|
303 |
+
diff_id = header.index('e/h')
|
304 |
+
except:
|
305 |
+
diff_id = None
|
306 |
+
print("No diff id found, assuming all easy")
|
307 |
+
track_id = header.index('tid')
|
308 |
+
print(img_id)
|
309 |
+
print(x_id)
|
310 |
+
print(y_id)
|
311 |
+
print(track_id)
|
312 |
+
|
313 |
+
rows = []
|
314 |
+
for row in csvreader:
|
315 |
+
rows.append(row)
|
316 |
+
print(rows[:6])
|
317 |
+
file.close()
|
318 |
+
|
319 |
+
tid_visited = []
|
320 |
+
#video_dir = "./video9_feature_optical_flow_median_back_2pyr_18win/test/"
|
321 |
+
bacteria_folder = "bacteria"
|
322 |
+
bacteria_easy_hard_state_file = "easy_hard_veryhard"
|
323 |
+
bacteria_coords = "xy_coord"
|
324 |
+
count = 0
|
325 |
+
max_pid = 0
|
326 |
+
prev_tid = 0
|
327 |
+
|
328 |
+
for row in rows:
|
329 |
+
pid = int(row[img_id])-1
|
330 |
+
if max_pid < pid:
|
331 |
+
max_pid = pid
|
332 |
+
|
333 |
+
for row in rows:
|
334 |
+
tid = row[track_id]
|
335 |
+
if tid not in tid_visited:
|
336 |
+
tid_visited.append(tid)
|
337 |
+
|
338 |
+
# if count<(max_pid-1) and count>0:
|
339 |
+
# # print(row)
|
340 |
+
# # print(rows[i+1])
|
341 |
+
# print(count)
|
342 |
+
# print(max_pid)
|
343 |
+
# for i in range((max_pid - count-1)):
|
344 |
+
# txt_file.write(str(count))
|
345 |
+
# txt_file.write(" ")
|
346 |
+
# txt_file.write("NotPresent")
|
347 |
+
# txt_file.write("\n")
|
348 |
+
|
349 |
+
# coord_file = open(os.path.join(video_dir, bacteria_folder, str(prev_tid), bacteria_coords, str(count)) +".txt",'w')
|
350 |
+
# coord_file.close()
|
351 |
+
# count = count+1
|
352 |
+
# txt_file.close()
|
353 |
+
# coord_file.close()
|
354 |
+
count = 0
|
355 |
+
os.makedirs(os.path.join(video_dir, bacteria_folder, str(tid), bacteria_coords), exist_ok=True)
|
356 |
+
#os.makedirs(os.path.join(video_dir, bacteria_folder, str(tid)), exist_ok=True)
|
357 |
+
try:
|
358 |
+
os.remove(os.path.join(video_dir, bacteria_folder, str(tid), bacteria_easy_hard_state_file) +".txt")
|
359 |
+
#os.remove(os.path.join(video_dir, bacteria_folder, str(tid), bacteria_coords, str(count)) +".txt")
|
360 |
+
except OSError:
|
361 |
+
pass
|
362 |
+
|
363 |
+
txt_file = open(os.path.join(video_dir, bacteria_folder, str(tid), bacteria_easy_hard_state_file) +".txt",'a')
|
364 |
+
pid = int(row[img_id]) - 1
|
365 |
+
if int(pid) == 0: #for optical flow since first frame is skipped
|
366 |
+
continue
|
367 |
+
if pid-2>count: # pid-1 because 1 is skipped
|
368 |
+
# print(count)
|
369 |
+
# print(pid)
|
370 |
+
for i in range((pid - count-1)):
|
371 |
+
txt_file.write(str(count))
|
372 |
+
txt_file.write(" ")
|
373 |
+
txt_file.write("NotPresent")
|
374 |
+
txt_file.write("\n")
|
375 |
+
|
376 |
+
coord_file = open(os.path.join(video_dir, bacteria_folder, str(tid), bacteria_coords, str(count)) +".txt",'w')
|
377 |
+
coord_file.close()
|
378 |
+
count = count+1
|
379 |
+
|
380 |
+
txt_file.write(str(count))
|
381 |
+
txt_file.write(" ")
|
382 |
+
try:
|
383 |
+
txt_file.write(row[diff_id])
|
384 |
+
except:
|
385 |
+
txt_file.write("E")
|
386 |
+
#print("No diff id found, assuming all easy")
|
387 |
+
txt_file.write("\n")
|
388 |
+
|
389 |
+
|
390 |
+
|
391 |
+
coord_file = open(os.path.join(video_dir, bacteria_folder, str(tid), bacteria_coords, str(count)) +".txt",'a')
|
392 |
+
coord_file.write(row[x_id])
|
393 |
+
coord_file.write(" ")
|
394 |
+
coord_file.write(row[y_id])
|
395 |
+
coord_file.write("\n")
|
396 |
+
|
397 |
+
|
398 |
+
count = count+1
|
399 |
+
|
400 |
+
if count<(max_pid-1) and count>0:
|
401 |
+
# print(row)
|
402 |
+
# print(rows[i+1])
|
403 |
+
print(count)
|
404 |
+
print(max_pid)
|
405 |
+
for i in range((max_pid - count)):
|
406 |
+
txt_file.write(str(count))
|
407 |
+
txt_file.write(" ")
|
408 |
+
txt_file.write("NotPresent")
|
409 |
+
txt_file.write("\n")
|
410 |
+
|
411 |
+
coord_file = open(os.path.join(video_dir, bacteria_folder, str(tid), bacteria_coords, str(count)) +".txt",'w')
|
412 |
+
coord_file.close()
|
413 |
+
count = count+1
|
414 |
+
txt_file.close()
|
415 |
+
coord_file.close()
|
416 |
+
|
417 |
+
|
418 |
+
def create_video(data_dir):
|
419 |
+
# choose codec according to format needed
|
420 |
+
fourcc = cv2.VideoWriter_fourcc(*'mp4v')
|
421 |
+
#print(data_dir)
|
422 |
+
img_sample = cv2.imread(os.path.join(data_dir,"images/0.tif"))
|
423 |
+
#print(img_sample.shape)
|
424 |
+
height, width, channels = img_sample.shape
|
425 |
+
|
426 |
+
video = cv2.VideoWriter(data_dir + 'video.mp4', fourcc, 1, (width, height))
|
427 |
+
#data_dir = "./Data/video3/"
|
428 |
+
image_dir = os.path.join(data_dir, "images")
|
429 |
+
for frame in natsorted(os.listdir(image_dir)):
|
430 |
+
#print(frame)
|
431 |
+
img = cv2.imread(os.path.join(image_dir, frame))
|
432 |
+
video.write(img)
|
433 |
+
video.release()
|
434 |
+
|
435 |
+
def get_background(file_path, mean=True, sample=False):
|
436 |
+
cap = cv2.VideoCapture(file_path)
|
437 |
+
#print(cap.read())
|
438 |
+
# we will randomly select 50 frames for the calculating the median
|
439 |
+
#frame_indices = cap.get(cv2.CAP_PROP_FRAME_COUNT) * np.random.uniform(size=20)
|
440 |
+
frame_indices = list(range(0,int(cap.get(cv2.CAP_PROP_FRAME_COUNT)) ))
|
441 |
+
print(len(frame_indices))
|
442 |
+
# we will store the frames in array
|
443 |
+
frames = []
|
444 |
+
for idx in frame_indices:
|
445 |
+
# set the frame id to read that particular frame
|
446 |
+
cap.set(cv2.CAP_PROP_POS_FRAMES, idx)
|
447 |
+
ret, frame = cap.read()
|
448 |
+
#print(ret)
|
449 |
+
frames.append(frame)
|
450 |
+
if mean:
|
451 |
+
# calculate the mean
|
452 |
+
background_frame = np.mean(frames, axis=0).astype(np.uint8)
|
453 |
+
else:
|
454 |
+
# calculate the median
|
455 |
+
background_frame = np.median(frames, axis=0).astype(np.uint8)
|
456 |
+
if sample==True:
|
457 |
+
background_frame = cv2.imread("./Control_2_b0t5306c0x0-660y0-492.tiff")
|
458 |
+
#background_frame = cv2.imread("./RBS 2_1_b0t2791c0x0-660y0-492.tiff")
|
459 |
+
return background_frame
|
460 |
+
|
461 |
+
|
462 |
+
def process_data(folder, src, final_data_dir, out_sub_dir, videomap="videomap.txt", csv_file_name="Raw Data.csv", inference_mode=False, unzip=False):
|
463 |
+
|
464 |
+
target_data_sub_dir = os.path.join(final_data_dir, out_sub_dir)
|
465 |
+
print("target_data_sub_dir: ", target_data_sub_dir)
|
466 |
+
os.makedirs(target_data_sub_dir, exist_ok=True)
|
467 |
+
|
468 |
+
video_map_path = os.path.join(final_data_dir, videomap )
|
469 |
+
print("video_map_path: ", video_map_path)
|
470 |
+
|
471 |
+
video_dir = os.path.join(src, folder.split(".zip")[0])
|
472 |
+
|
473 |
+
if unzip:
|
474 |
+
# unzip data
|
475 |
+
zip_data_dir = os.path.join(src, folder)
|
476 |
+
with zipfile.ZipFile(zip_data_dir, 'r') as zip_ref:
|
477 |
+
zip_ref.extractall(src)
|
478 |
+
|
479 |
+
if os.path.exists(video_map_path) and video_in_videomap(video_map_path, folder):
|
480 |
+
print("video alread processed: ", folder.split(".zip")[0])
|
481 |
+
raise Exception("video alread processed")
|
482 |
+
return
|
483 |
+
|
484 |
+
# add video to videomap
|
485 |
+
video_num = add_video_to_videomap(video_map_path, video_dir, final_data_dir)
|
486 |
+
inference_mode = create_annotations(video_dir, csv_file_name, inference_mode, video_num, target_data_sub_dir)
|
487 |
+
|
488 |
+
if not inference_mode:
|
489 |
+
target_video_dir = f"{target_data_sub_dir}/video{video_num}/frame1"
|
490 |
+
csv_file = os.path.join(video_dir, csv_file_name)
|
491 |
+
file = open(csv_file)
|
492 |
+
generate_bacteria_data(file, target_video_dir)
|
493 |
+
file.close()
|
494 |
+
|
495 |
+
data_path = target_data_sub_dir
|
496 |
+
test_video = [f"video{video_num}"]
|
497 |
+
for video in natsorted(test_video):
|
498 |
+
if not video.startswith('.') and os.path.isdir(os.path.join(data_path,video))==True:
|
499 |
+
for minivideo in natsorted(os.listdir(os.path.join(data_path,video))):
|
500 |
+
if not minivideo.startswith('.') and os.path.isdir(os.path.join(data_path,video,minivideo)) == True:
|
501 |
+
#print(minivideo)
|
502 |
+
create_video(os.path.join(data_path,video,minivideo))
|
503 |
+
|
504 |
+
print(f"Video {video_num} processed")
|
505 |
+
return video_num
|
506 |
+
|
507 |
+
|
508 |
+
if __name__ == "__main__":
|
509 |
+
|
510 |
+
parser = argparse.ArgumentParser(description="Process folders.")
|
511 |
+
parser.add_argument("--src_dir", default = "DataAll/Collagen/", help="Path to the folder to process")
|
512 |
+
parser.add_argument("--folder", default ="video1", help="Path to the folder to process")
|
513 |
+
parser.add_argument("--out_dir", default ="MEMTrack/data/", help="Path to the folder to process")
|
514 |
+
parser.add_argument("--out_sub_dir", help="Path to the folder to process")
|
515 |
+
parser.add_argument("--csv_file_name", help="Path to the folder to process", default= "Raw Data.csv")
|
516 |
+
parser.add_argument("--videomap", default="videomap.txt")
|
517 |
+
parser.add_argument("--no_gt", action="store_true")
|
518 |
+
parser.add_argument("--unzip", action="store_true", help="Unzip folder")
|
519 |
+
|
520 |
+
|
521 |
+
args = parser.parse_args(args)
|
522 |
+
src = args.src_dir
|
523 |
+
final_data_dir = args.out_dir
|
524 |
+
out_sub_dir = args.out_sub_dir
|
525 |
+
videomap = args.videomap
|
526 |
+
csv_file_name = args.csv_file_name
|
527 |
+
inference_mode = args.no_gt
|
528 |
+
folder= args.folder
|
529 |
+
unzip = args.unzip
|
530 |
+
|
531 |
+
process_data(folder, src, final_data_dir, out_sub_dir, videomap, csv_file_name, inference_mode, unzip)
|
MEMTrack/src/evaluation_step_wise_motility.py
ADDED
@@ -0,0 +1,755 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
import random
|
2 |
+
import os
|
3 |
+
import cv2
|
4 |
+
import json
|
5 |
+
import matplotlib
|
6 |
+
import argparse
|
7 |
+
from matplotlib import image, patches
|
8 |
+
import matplotlib.pyplot as pyplot
|
9 |
+
import seaborn as sns
|
10 |
+
import numpy as np
|
11 |
+
import seaborn as sns
|
12 |
+
import pandas as pd
|
13 |
+
import matplotlib.pyplot as plt
|
14 |
+
|
15 |
+
ap = argparse.ArgumentParser(description='Inference')
|
16 |
+
ap.add_argument('--video_map_path', type=str, metavar='PATH')
|
17 |
+
ap.add_argument('--data_path', type=str, metavar='PATH')
|
18 |
+
ap.add_argument('--video', type=str, metavar='PATH')
|
19 |
+
ap.add_argument('--all_labels_file', type=str, metavar='PATH')
|
20 |
+
args = ap.parse_args()
|
21 |
+
|
22 |
+
def bb_intersection_over_union(boxA, boxB):
|
23 |
+
# determine the (x, y)-coordinates of the intersection rectangle
|
24 |
+
xA = max(boxA[0], boxB[0])
|
25 |
+
yA = max(boxA[1], boxB[1])
|
26 |
+
xB = min(boxA[2], boxB[2])
|
27 |
+
yB = min(boxA[3], boxB[3])
|
28 |
+
|
29 |
+
# compute the area of intersection rectangle
|
30 |
+
interArea = abs(max((xB - xA, 0)) * max((yB - yA), 0))
|
31 |
+
if interArea == 0:
|
32 |
+
return 0
|
33 |
+
|
34 |
+
# compute the area of both the prediction and ground-truth
|
35 |
+
# rectangles
|
36 |
+
boxAArea = abs((boxA[2] - boxA[0]) * (boxA[3] - boxA[1]))
|
37 |
+
boxBArea = abs((boxB[2] - boxB[0]) * (boxB[3] - boxB[1]))
|
38 |
+
|
39 |
+
# compute the intersection over union by taking the intersection
|
40 |
+
# area and dividing it by the sum of prediction + ground-truth
|
41 |
+
# areas - the interesection area
|
42 |
+
iou_box = interArea / float(boxAArea + boxBArea - interArea)
|
43 |
+
|
44 |
+
# return the intersection over union value
|
45 |
+
return iou_box
|
46 |
+
|
47 |
+
def get_iou_mat_image(detectron_train_file_bbox_list, coco_results_file_bbox_list ):
|
48 |
+
# iou_mat = []
|
49 |
+
# for gt_box in detectron_train_file_bbox_list:
|
50 |
+
# iou_row = []
|
51 |
+
# for pred_box in coco_results_file_bbox_list:
|
52 |
+
# gt_box_xyxy = [gt_box[0], gt_box[1], gt_box[0] + gt_box[2], gt_box[1] + gt_box[3]]
|
53 |
+
# pred_box_xyxy = [pred_box[0], pred_box[1], pred_box[0] + pred_box[2], pred_box[1] + pred_box[3]]
|
54 |
+
# # print(pred_box_xyxy)
|
55 |
+
# iou_boxes = bb_intersection_over_union(gt_box_xyxy, pred_box_xyxy)
|
56 |
+
# iou_row.append(iou_boxes)
|
57 |
+
# iou_mat.append(iou_row)
|
58 |
+
iou_mat = np.zeros((len(detectron_train_file_bbox_list), len(coco_results_file_bbox_list)))
|
59 |
+
for i, gt_box in enumerate(detectron_train_file_bbox_list):
|
60 |
+
for j, pred_box in enumerate(coco_results_file_bbox_list):
|
61 |
+
gt_box_xyxy = [gt_box[0], gt_box[1], gt_box[0] + gt_box[2], gt_box[1] + gt_box[3]]
|
62 |
+
pred_box_xyxy = [pred_box[0], pred_box[1], pred_box[0] + pred_box[2], pred_box[1] + pred_box[3]]
|
63 |
+
iou_mat[i, j] = bb_intersection_over_union(gt_box_xyxy, pred_box_xyxy)
|
64 |
+
return iou_mat
|
65 |
+
|
66 |
+
def get_matching_boxes_iou(detectron_train_file_bbox_list, iou_mat, iou_thresh = 0.75):
|
67 |
+
matching_iou_boxes = []
|
68 |
+
iou_mat = iou_mat.tolist()
|
69 |
+
for i in range(0, len(detectron_train_file_bbox_list)):
|
70 |
+
if iou_mat[i]:
|
71 |
+
iou_row_max = max(iou_mat[i])
|
72 |
+
iou_row_max_pred_id = iou_mat[i].index(iou_row_max)
|
73 |
+
# print("iou_mat:", iou_mat)
|
74 |
+
# print((iou_row_max))
|
75 |
+
# print(iou_row_max_pred_id)
|
76 |
+
#print(iou_row_max)
|
77 |
+
if iou_row_max>iou_thresh:
|
78 |
+
matching_iou_boxes.append([i, iou_row_max_pred_id, iou_row_max])
|
79 |
+
# print(matching_iou_boxes)
|
80 |
+
#print("Number of matching IOU Ground truth and Predicted boxes: " , len(matching_iou_boxes))
|
81 |
+
return matching_iou_boxes
|
82 |
+
|
83 |
+
def compute_mse_matching_boxes(matching_iou_boxes, detectron_train_file_bbox_list, coco_results_file_bbox_list):
|
84 |
+
# Compute MSE of fiber intersection based on matching iou-boxes
|
85 |
+
mse = []
|
86 |
+
iou_sum = 0
|
87 |
+
for matching_bbox in matching_iou_boxes:
|
88 |
+
# ground truth
|
89 |
+
x = detectron_train_file_bbox_list[matching_bbox[0]][0]
|
90 |
+
y = detectron_train_file_bbox_list[matching_bbox[0]][1]
|
91 |
+
w = detectron_train_file_bbox_list[matching_bbox[0]][2]
|
92 |
+
h = detectron_train_file_bbox_list[matching_bbox[0]][3]
|
93 |
+
center_x_gt = x + (w/2)
|
94 |
+
center_y_gt = y + (h/2)
|
95 |
+
|
96 |
+
# predicted
|
97 |
+
x = coco_results_file_bbox_list[matching_bbox[1]][0]
|
98 |
+
y = coco_results_file_bbox_list[matching_bbox[1]][1]
|
99 |
+
w = coco_results_file_bbox_list[matching_bbox[1]][2]
|
100 |
+
h = coco_results_file_bbox_list[matching_bbox[1]][3]
|
101 |
+
center_x_pred = x + (w / 2)
|
102 |
+
center_y_pred = y + (h / 2)
|
103 |
+
|
104 |
+
mse.append(((center_x_gt - center_x_pred)**2 + (center_y_gt - center_y_pred)** 2)**0.5)
|
105 |
+
iou_sum += matching_bbox[2]
|
106 |
+
|
107 |
+
# np.array(matching_iou_boxes)[2].mean()
|
108 |
+
if len(matching_iou_boxes)>0:
|
109 |
+
return np.array(mse).mean(), iou_sum/len(matching_iou_boxes), mse
|
110 |
+
else:
|
111 |
+
return np.array(mse).mean(), -1, mse
|
112 |
+
|
113 |
+
def nms_predicted_bounding_boxes(pred_bbox_list, pred_bbox_scores, iou_thresh_nms=0.95):
|
114 |
+
#NMS of predicted boxes
|
115 |
+
#print(len(pred_bbox_list))
|
116 |
+
final_tracking_bbox_list =[]
|
117 |
+
iou_mat = get_iou_mat_image(pred_bbox_list, pred_bbox_list)
|
118 |
+
|
119 |
+
matching_pred_boxes_iou = get_matching_boxes_iou(pred_bbox_list, iou_mat, iou_thresh = iou_thresh_nms)
|
120 |
+
while(len(matching_pred_boxes_iou)>0):
|
121 |
+
sorted_bbox_list = sorted(zip(pred_bbox_scores, pred_bbox_list))
|
122 |
+
iou_mat = get_iou_mat_image(pred_bbox_list, [pred_bbox_list[-1]])
|
123 |
+
#print(iou_mat)
|
124 |
+
pred_bbox_list_temp = []
|
125 |
+
pred_bbox_scores_temp = []
|
126 |
+
for index, iou in enumerate(iou_mat):
|
127 |
+
if iou[0]<iou_thresh_nms:
|
128 |
+
pred_bbox_list_temp.append(pred_bbox_list[index])
|
129 |
+
pred_bbox_scores_temp.append(pred_bbox_scores[index])
|
130 |
+
# matching_pred_boxes_iou = get_matching_boxes_iou(pred_bbox_list, iou_mat, iou_thresh = iou_thresh_nms)
|
131 |
+
# print(matching_pred_boxes_iou)
|
132 |
+
final_tracking_bbox_list.append(pred_bbox_list[-1]) #add highest scored bbox to final list
|
133 |
+
# matching_pred_boxes_index=[]
|
134 |
+
# for bbox in matching_pred_boxes_iou:
|
135 |
+
# matching_pred_boxes_index.append(bbox[0])
|
136 |
+
# print("hello",matching_pred_boxes_index)
|
137 |
+
|
138 |
+
# for index, bbox in enumerate(pred_bbox_list):
|
139 |
+
# if index not in matching_pred_boxes_index:
|
140 |
+
# pred_bbox_list_temp.append(pred_bbox_list[index])
|
141 |
+
# pred_bbox_scores_temp.append(pred_bbox_scores[index])
|
142 |
+
pred_bbox_list = pred_bbox_list_temp
|
143 |
+
pred_bbox_scores = pred_bbox_scores_temp
|
144 |
+
iou_mat = get_iou_mat_image(pred_bbox_list, pred_bbox_list)
|
145 |
+
matching_pred_boxes_iou = get_matching_boxes_iou(pred_bbox_list, iou_mat, iou_thresh = iou_thresh_nms)
|
146 |
+
|
147 |
+
|
148 |
+
#print(final_tracking_bbox_list + (pred_bbox_list))
|
149 |
+
# print(len(final_tracking_bbox_list))
|
150 |
+
# print(len(pred_bbox_list))
|
151 |
+
#print(len(final_tracking_bbox_list + (pred_bbox_list)))
|
152 |
+
return final_tracking_bbox_list + (pred_bbox_list)
|
153 |
+
|
154 |
+
|
155 |
+
def get_statistics(data_path, images_path, coco_eval_path, test_json_path, train_file_name="train.json", detectron_train_file_name = "boardetect_train_coco_format.json",iou_thresh=0.75, bacteria_tp_path="./bacteria-detections/", coco_file = "coco_instances_results.json",print_details=True, store_results_path = None):
|
156 |
+
# data_path = "./data_phase_nodes/"
|
157 |
+
# images_path = data_path + "images_cropped_resized/"
|
158 |
+
# coco_eval_path = "./coco_eval_phase_cropped/"
|
159 |
+
num_images = 0
|
160 |
+
tp_sum = 0
|
161 |
+
fp_sum = 0
|
162 |
+
fn_sum = 0
|
163 |
+
mse_image_counter =0
|
164 |
+
iou_image_counter =0
|
165 |
+
ratio_gt_p = 0
|
166 |
+
ratio_p_gt = 0
|
167 |
+
mse = 0
|
168 |
+
precision = 0
|
169 |
+
recall = 0
|
170 |
+
iou = 0
|
171 |
+
all_img_mse = []
|
172 |
+
for filename in sorted(os.listdir(images_path)):
|
173 |
+
# print(filename)
|
174 |
+
image_id = filename.split(".tif")[0]
|
175 |
+
#print(image_id)
|
176 |
+
train_file = json.load(open(data_path + train_file_name,'r'))
|
177 |
+
train_file_bbox_list = []
|
178 |
+
#print(train_file[int(image_id)]["file_name"])
|
179 |
+
if train_file[int(image_id)]["file_name"] == filename:
|
180 |
+
for i, annotation in enumerate(train_file[int(image_id)]["annotations"]):
|
181 |
+
train_file_bbox_list.append(train_file[int(image_id)]["annotations"][i]["bbox"])
|
182 |
+
# print("Number of Ground Truth boxes: ",len(train_file_bbox_list))
|
183 |
+
# print(train_file_bbox_list[0])
|
184 |
+
|
185 |
+
detectron_train_file = json.load(open(test_json_path + detectron_train_file_name,'r'))
|
186 |
+
detectron_train_file_bbox_list = []
|
187 |
+
|
188 |
+
|
189 |
+
try:
|
190 |
+
for i,annotation in enumerate(detectron_train_file["annotations"]):
|
191 |
+
if detectron_train_file["annotations"][i]["image_id"] == int(image_id):
|
192 |
+
detectron_train_file_bbox_list.append(detectron_train_file["annotations"][i]["bbox"])
|
193 |
+
#annotation_image_id = detectron_train_file["annotations"][i]["image_id"]
|
194 |
+
#print(detectron_train_file["annotations"][i]["bbox"])
|
195 |
+
# print(len(detectron_train_file_bbox_list))
|
196 |
+
# print(detectron_train_file_bbox_list[0])
|
197 |
+
num_ground_truth_boxes = len(detectron_train_file_bbox_list)
|
198 |
+
except:
|
199 |
+
num_ground_truth_boxes = 0
|
200 |
+
|
201 |
+
|
202 |
+
coco_results_file = json.load(open(coco_eval_path + coco_file,'r'))
|
203 |
+
coco_results_file_bbox_list = []
|
204 |
+
coco_results_file_bbox_scores = []
|
205 |
+
|
206 |
+
for i,annotation in enumerate(coco_results_file):
|
207 |
+
#print(coco_results_file[i])
|
208 |
+
#print(image_id)
|
209 |
+
if coco_results_file[i]["image_id"] == int(image_id):
|
210 |
+
#print(true)
|
211 |
+
coco_results_file_bbox_list.append(coco_results_file[i]["bbox"])
|
212 |
+
coco_results_file_bbox_scores.append(coco_results_file[i]["score"])
|
213 |
+
# print(coco_results_file[i]["bbox"])
|
214 |
+
# print(len(coco_results_file_bbox_list))
|
215 |
+
# print((coco_results_file_bbox_list))
|
216 |
+
#print(detectron_train_file["annotations"][i]["bbox"])
|
217 |
+
# print("Number of Predicted bounding boxes: ", len(coco_results_file_bbox_list))
|
218 |
+
# print(coco_results_file_bbox_list[0])
|
219 |
+
|
220 |
+
#coco_results_file_bbox_list = nms_predicted_bounding_boxes(coco_results_file_bbox_list, coco_results_file_bbox_scores ,iou_thresh_nms=0.001)
|
221 |
+
|
222 |
+
# Stat 1 - Ratio of boxes Predicted to Groud truth boxes
|
223 |
+
num_predicted_boxes = len(coco_results_file_bbox_list)
|
224 |
+
|
225 |
+
|
226 |
+
num_images = num_images + 1
|
227 |
+
if num_ground_truth_boxes>0:
|
228 |
+
|
229 |
+
#Ratio of GT to P per image
|
230 |
+
ratio_gt_p = ratio_gt_p + (num_ground_truth_boxes/ max(num_predicted_boxes,1))
|
231 |
+
ratio_p_gt = ratio_p_gt + (num_predicted_boxes / max(num_ground_truth_boxes,1))
|
232 |
+
|
233 |
+
|
234 |
+
# Stat 2 - MSE of fiber intersections from the matching boxes
|
235 |
+
iou_mat = get_iou_mat_image(detectron_train_file_bbox_list, coco_results_file_bbox_list)
|
236 |
+
matching_boxes_iou = get_matching_boxes_iou(detectron_train_file_bbox_list, iou_mat, iou_thresh = iou_thresh)
|
237 |
+
mse_image, iou_image, mse_list = compute_mse_matching_boxes(matching_boxes_iou, detectron_train_file_bbox_list, coco_results_file_bbox_list)
|
238 |
+
if mse_image>=0: #if no predicted boxes or no ground truth boxes then iou is nan
|
239 |
+
mse = mse + mse_image
|
240 |
+
mse_image_counter += 1
|
241 |
+
if iou_image>=0:#if no predicted boxes or no ground truth boxes then mse is nan
|
242 |
+
iou += iou_image
|
243 |
+
iou_image_counter += 1
|
244 |
+
|
245 |
+
true_positive = len(matching_boxes_iou)
|
246 |
+
# print("num image: ", num_images)
|
247 |
+
# print("Num pred: ", num_predicted_boxes)
|
248 |
+
# print("num matched: ", len(matching_boxes_iou))
|
249 |
+
os.makedirs(bacteria_tp_path, exist_ok=True)
|
250 |
+
coord_file = open(bacteria_tp_path + image_id +".txt",'w')
|
251 |
+
coord_file.write(image_id)
|
252 |
+
coord_file.write(" ")
|
253 |
+
coord_file.write(str(true_positive))
|
254 |
+
coord_file.write("\n")
|
255 |
+
coord_file.close()
|
256 |
+
|
257 |
+
# OLD METHOD OF PRECISION RECALL CALCULATION
|
258 |
+
# assumes 0 for no tp
|
259 |
+
# precision is average of averages
|
260 |
+
# if true_positive>0:
|
261 |
+
# false_positive = num_predicted_boxes - true_positive
|
262 |
+
# false_negative = num_ground_truth_boxes - true_positive
|
263 |
+
|
264 |
+
# precision += true_positive/(true_positive + false_positive)
|
265 |
+
|
266 |
+
# recall += true_positive/(true_positive + false_negative)
|
267 |
+
all_img_mse.extend(mse_list)
|
268 |
+
|
269 |
+
# UPDATED METHOD
|
270 |
+
# precision is total tp / total tp+fp in all images not average of preciosn for all images
|
271 |
+
|
272 |
+
false_positive = abs(num_predicted_boxes - true_positive)
|
273 |
+
false_negative = abs(num_ground_truth_boxes - true_positive)
|
274 |
+
|
275 |
+
tp_sum = tp_sum + true_positive
|
276 |
+
fp_sum = fp_sum + false_positive
|
277 |
+
fn_sum = fn_sum + false_negative
|
278 |
+
|
279 |
+
if num_ground_truth_boxes==0:
|
280 |
+
#rint("no gt")
|
281 |
+
fp_sum = fp_sum + num_predicted_boxes
|
282 |
+
|
283 |
+
# Stat 1 - Ratio of boxes Predicted to Groud truth boxes
|
284 |
+
avg_ratio_gt_p = ratio_gt_p/num_images
|
285 |
+
avg_ratio_p_gt = ratio_p_gt/num_images
|
286 |
+
|
287 |
+
|
288 |
+
# Stat 2 - MSE of fiber intersections from the matching boxes
|
289 |
+
try:
|
290 |
+
avg_mse = mse / mse_image_counter
|
291 |
+
except:
|
292 |
+
avg_mse = 0
|
293 |
+
|
294 |
+
|
295 |
+
try:
|
296 |
+
avg_iou = iou / iou_image_counter
|
297 |
+
except:
|
298 |
+
avg_iou = 0
|
299 |
+
try:
|
300 |
+
avg_prec = tp_sum / (tp_sum + fp_sum)
|
301 |
+
avg_recall = tp_sum / (tp_sum + fn_sum)
|
302 |
+
except:
|
303 |
+
avg_prec = 0
|
304 |
+
avg_recall = 0
|
305 |
+
|
306 |
+
if store_results_path:
|
307 |
+
result_file = open(store_results_path,'a+')
|
308 |
+
result_file.write(str(tp_sum))
|
309 |
+
result_file.write(",")
|
310 |
+
result_file.write(str(fp_sum))
|
311 |
+
result_file.write(",")
|
312 |
+
result_file.write(str(fn_sum))
|
313 |
+
result_file.write("\n")
|
314 |
+
result_file.close()
|
315 |
+
|
316 |
+
if print_details:
|
317 |
+
# ap = precision/num_images
|
318 |
+
# ar = recall/num_images
|
319 |
+
print("Average Ground Truth to Predicted Ratio: ", avg_ratio_gt_p)
|
320 |
+
print("Average Predicted to Ground Truth Ratio: ", avg_ratio_p_gt)
|
321 |
+
print("Average Mean Squared Error of fiber intersections: ", avg_mse)
|
322 |
+
print("Average IoU of TP boxes: ", avg_iou)
|
323 |
+
print("TP sum: ", tp_sum)
|
324 |
+
print("FP sum: ", fp_sum)
|
325 |
+
print("FN sum: ", fn_sum)
|
326 |
+
|
327 |
+
print("Average Precision: ", avg_prec)
|
328 |
+
print("Average Recall: ", avg_recall)
|
329 |
+
|
330 |
+
pyplot.figure(figsize=(10, 3))
|
331 |
+
sns.distplot(all_img_mse)
|
332 |
+
pyplot.xlabel("Nodal Errors (in px sq)")
|
333 |
+
# plt.xlabel("")
|
334 |
+
pyplot.grid("on", alpha = 0.3)
|
335 |
+
pyplot.show()
|
336 |
+
return avg_prec, avg_recall
|
337 |
+
|
338 |
+
video_map_path = args.video_map_path #"/alldata/medha/CleanCodeData/Data/videomap.txt"
|
339 |
+
video_map = open(video_map_path,'r',)
|
340 |
+
header = [x.strip() for x in (video_map.readline().strip()).split(",")]
|
341 |
+
video_num_id = header.index("video_num")
|
342 |
+
strain_id = header.index("strain")
|
343 |
+
strain_map = {}
|
344 |
+
for line in video_map.readlines():
|
345 |
+
line_details = [x.strip() for x in line.split(",")]
|
346 |
+
video_num = line_details[video_num_id]
|
347 |
+
strain = line_details[strain_id]
|
348 |
+
strain_map[video_num] = strain
|
349 |
+
strain_map[""] = "all"
|
350 |
+
|
351 |
+
#53 54 58 59 60 64 65 69 70 71 75 76 80 81 82 collagen
|
352 |
+
#85 24 30 35 83 84 agar0.2
|
353 |
+
#updated gar 19 22 29 30 83 84
|
354 |
+
#53 60 64 69 75 82
|
355 |
+
video_num = args.video
|
356 |
+
src_path = args.data_path #"/alldata/medha/CleanCodeData/DataFeatures/exp_collagen_train_veryhard/"
|
357 |
+
print(src_path)
|
358 |
+
video_path = f"data_video{video_num}_feature_optical_flow_median_back_2pyr_18win_background_img/"
|
359 |
+
data_path = os.path.join(src_path,video_path, "test/")
|
360 |
+
print(data_path)
|
361 |
+
images_path = data_path + "images/"
|
362 |
+
coco_eval_path = data_path
|
363 |
+
test_json_path = data_path
|
364 |
+
precision = {}
|
365 |
+
recall = {}
|
366 |
+
store_file_path_main = src_path + "test_set_results"
|
367 |
+
|
368 |
+
# ### Step 1: Detection Individual Models
|
369 |
+
|
370 |
+
# In[15]:
|
371 |
+
|
372 |
+
|
373 |
+
try:
|
374 |
+
difficulty_level = "Motility-low"
|
375 |
+
coco_file=f"coco_instances_results_{difficulty_level}.json"
|
376 |
+
detectron_train_file_name = f"boardetect_test_coco_format_{difficulty_level}.json"
|
377 |
+
train_file_name=f"test_{difficulty_level}.json"
|
378 |
+
|
379 |
+
coco_file = "coco_instances_results_wiggle_diffusivity.json"
|
380 |
+
train_file_name="test_Motility-wiggle.json"
|
381 |
+
detectron_train_file_name = "boardetect_test_coco_format_Motility-wiggle.json"
|
382 |
+
|
383 |
+
|
384 |
+
store_file_path = store_file_path_main + f"_{difficulty_level}.txt" if store_file_path_main else None
|
385 |
+
if store_file_path:
|
386 |
+
result_file = open(store_file_path,'a+')
|
387 |
+
result_file.write(str(video_num))
|
388 |
+
result_file.write(",")
|
389 |
+
result_file.close()
|
390 |
+
#bacteria_tp_path = ./data_video22_feature_optical_flow_median_back_2pyr_18win_background_img/test/1/predicted/"
|
391 |
+
# visualize_gt_pred(data_path, images_path, coco_eval_path, test_json_path,
|
392 |
+
# train_file_name=train_file_name,detectron_train_file_name=detectron_train_file_name,
|
393 |
+
# coco_file=coco_file)
|
394 |
+
|
395 |
+
pr, rc = get_statistics(data_path, images_path, coco_eval_path,test_json_path,
|
396 |
+
train_file_name=train_file_name,detectron_train_file_name = detectron_train_file_name,
|
397 |
+
iou_thresh=0.1, coco_file=coco_file, print_details=False, store_results_path=store_file_path)
|
398 |
+
print("Precision: ", pr)
|
399 |
+
print("Recall: ", rc)
|
400 |
+
precision[ "Motility-low"] = pr
|
401 |
+
recall[ "Motility-low"] = rc
|
402 |
+
except:
|
403 |
+
precision[ "Motility-low"] = 0.0
|
404 |
+
recall[ "Motility-low"] = 0.0
|
405 |
+
with open(store_file_path, "r+") as f:
|
406 |
+
current_position = previous_position = f.tell()
|
407 |
+
while f.readline():
|
408 |
+
previous_position = current_position
|
409 |
+
current_position = f.tell()
|
410 |
+
f.truncate(previous_position)
|
411 |
+
f.close()
|
412 |
+
|
413 |
+
|
414 |
+
# In[16]:
|
415 |
+
|
416 |
+
|
417 |
+
try:
|
418 |
+
difficulty_level = "Motility-wiggle"
|
419 |
+
coco_file=f"coco_instances_results_{difficulty_level}.json"
|
420 |
+
detectron_train_file_name = f"boardetect_test_coco_format_{difficulty_level}.json"
|
421 |
+
train_file_name=f"test_{difficulty_level}.json"
|
422 |
+
store_file_path = store_file_path_main + f"_{difficulty_level}.txt" if store_file_path_main else None
|
423 |
+
if store_file_path:
|
424 |
+
result_file = open(store_file_path,'a+')
|
425 |
+
result_file.write(str(video_num))
|
426 |
+
result_file.write(",")
|
427 |
+
result_file.close()
|
428 |
+
#bacteria_tp_path = ./data_video22_feature_optical_flow_median_back_2pyr_18win_background_img/test/1/predicted/"
|
429 |
+
# visualize_gt_pred(data_path, images_path, coco_eval_path, test_json_path,
|
430 |
+
# train_file_name=train_file_name,detectron_train_file_name=detectron_train_file_name,
|
431 |
+
# coco_file=coco_file)
|
432 |
+
|
433 |
+
pr, rc = get_statistics(data_path, images_path, coco_eval_path,test_json_path,
|
434 |
+
train_file_name=train_file_name,detectron_train_file_name = detectron_train_file_name,
|
435 |
+
iou_thresh=0.1, coco_file=coco_file, print_details=False, store_results_path=store_file_path)
|
436 |
+
print("Precision: ", pr)
|
437 |
+
print("Recall: ", rc)
|
438 |
+
precision["Motility-wiggle"] = pr
|
439 |
+
recall["Motility-wiggle"] = rc
|
440 |
+
except:
|
441 |
+
precision["Motility-wiggle"] = 0.0
|
442 |
+
recall["Motility-wiggle"] = 0.0
|
443 |
+
with open(store_file_path, "r+") as f:
|
444 |
+
current_position = previous_position = f.tell()
|
445 |
+
while f.readline():
|
446 |
+
previous_position = current_position
|
447 |
+
current_position = f.tell()
|
448 |
+
f.truncate(previous_position)
|
449 |
+
f.close()
|
450 |
+
|
451 |
+
|
452 |
+
# In[17]:
|
453 |
+
|
454 |
+
|
455 |
+
try:
|
456 |
+
difficulty_level = "Motility-mid"
|
457 |
+
coco_file=f"coco_instances_results_{difficulty_level}.json"
|
458 |
+
detectron_train_file_name = f"boardetect_test_coco_format_{difficulty_level}.json"
|
459 |
+
train_file_name=f"test_{difficulty_level}.json"
|
460 |
+
store_file_path = store_file_path_main + f"_{difficulty_level}.txt" if store_file_path_main else None
|
461 |
+
if store_file_path:
|
462 |
+
result_file = open(store_file_path,'a+')
|
463 |
+
result_file.write(str(video_num))
|
464 |
+
result_file.write(",")
|
465 |
+
result_file.close()
|
466 |
+
#bacteria_tp_path = ./data_video22_feature_optical_flow_median_back_2pyr_18win_background_img/test/1/predicted/"
|
467 |
+
# visualize_gt_pred(data_path, images_path, coco_eval_path, test_json_path,
|
468 |
+
# train_file_name=train_file_name,detectron_train_file_name=detectron_train_file_name,
|
469 |
+
# coco_file=coco_file)
|
470 |
+
|
471 |
+
pr, rc = get_statistics(data_path, images_path, coco_eval_path,test_json_path,
|
472 |
+
train_file_name=train_file_name,detectron_train_file_name = detectron_train_file_name,
|
473 |
+
iou_thresh=0.1, coco_file=coco_file, print_details=False, store_results_path=store_file_path)
|
474 |
+
print("Precision: ", pr)
|
475 |
+
print("Recall: ", rc)
|
476 |
+
precision["Motility-mid"] = pr
|
477 |
+
recall["Motility-mid"] = rc
|
478 |
+
except:
|
479 |
+
precision["Motility-mid"] = 0.0
|
480 |
+
recall["Motility-mid"] = 0.0
|
481 |
+
with open(store_file_path, "r+") as f:
|
482 |
+
current_position = previous_position = f.tell()
|
483 |
+
while f.readline():
|
484 |
+
previous_position = current_position
|
485 |
+
current_position = f.tell()
|
486 |
+
f.truncate(previous_position)
|
487 |
+
f.close()
|
488 |
+
|
489 |
+
|
490 |
+
try:
|
491 |
+
difficulty_level = "Motility-high"
|
492 |
+
coco_file=f"coco_instances_results_{difficulty_level}.json"
|
493 |
+
detectron_train_file_name = f"boardetect_test_coco_format_{difficulty_level}.json"
|
494 |
+
train_file_name=f"test_{difficulty_level}.json"
|
495 |
+
store_file_path = store_file_path_main + f"_{difficulty_level}.txt" if store_file_path_main else None
|
496 |
+
if store_file_path:
|
497 |
+
result_file = open(store_file_path,'a+')
|
498 |
+
result_file.write(str(video_num))
|
499 |
+
result_file.write(",")
|
500 |
+
result_file.close()
|
501 |
+
#bacteria_tp_path = ./data_video22_feature_optical_flow_median_back_2pyr_18win_background_img/test/1/predicted/"
|
502 |
+
# visualize_gt_pred(data_path, images_path, coco_eval_path, test_json_path,
|
503 |
+
# train_file_name=train_file_name,detectron_train_file_name=detectron_train_file_name,
|
504 |
+
# coco_file=coco_file)
|
505 |
+
|
506 |
+
pr, rc = get_statistics(data_path, images_path, coco_eval_path,test_json_path,
|
507 |
+
train_file_name=train_file_name,detectron_train_file_name = detectron_train_file_name,
|
508 |
+
iou_thresh=0.1, coco_file=coco_file, print_details=False, store_results_path=store_file_path)
|
509 |
+
print("Precision: ", pr)
|
510 |
+
print("Recall: ", rc)
|
511 |
+
precision["Motility-high"] = pr
|
512 |
+
recall["Motility-high"] = rc
|
513 |
+
except:
|
514 |
+
precision["Motility-high"] = 0.0
|
515 |
+
recall["Motility-high"] = 0.0
|
516 |
+
with open(store_file_path, "r+") as f:
|
517 |
+
current_position = previous_position = f.tell()
|
518 |
+
while f.readline():
|
519 |
+
previous_position = current_position
|
520 |
+
current_position = f.tell()
|
521 |
+
f.truncate(previous_position)
|
522 |
+
f.close()
|
523 |
+
|
524 |
+
|
525 |
+
|
526 |
+
|
527 |
+
# ### Step 2: Detection Combination Model
|
528 |
+
|
529 |
+
# In[18]:
|
530 |
+
|
531 |
+
coco_file = "coco_instances_results_combined.json"
|
532 |
+
|
533 |
+
|
534 |
+
train_file_name=f"test_{args.all_labels_file}.json"
|
535 |
+
detectron_train_file_name = f"boardetect_test_coco_format_{args.all_labels_file}.json"
|
536 |
+
store_file_path = store_file_path_main + f"_combined.txt" if store_file_path_main else None
|
537 |
+
if store_file_path:
|
538 |
+
result_file = open(store_file_path,'a+')
|
539 |
+
result_file.write(str(video_num))
|
540 |
+
result_file.write(",")
|
541 |
+
result_file.close()
|
542 |
+
# visualize_gt_pred(data_path, images_path, coco_eval_path, test_json_path,
|
543 |
+
# train_file_name=train_file_name,
|
544 |
+
# detectron_train_file_name = detectron_train_file_name, coco_file=coco_file)
|
545 |
+
pr, rc = get_statistics(data_path, images_path, coco_eval_path, test_json_path,
|
546 |
+
train_file_name=train_file_name, detectron_train_file_name = detectron_train_file_name,
|
547 |
+
iou_thresh=0.1,coco_file=coco_file,print_details=False, store_results_path=store_file_path)
|
548 |
+
print("Precision: ", pr)
|
549 |
+
print("Recall: ", rc)
|
550 |
+
precision["Combination Model Detection"] = pr
|
551 |
+
recall["Combination Model Detection"] = rc
|
552 |
+
|
553 |
+
|
554 |
+
|
555 |
+
|
556 |
+
# ### Step 3: Filter on Predicted Bacteria Bounding Box Size
|
557 |
+
|
558 |
+
# In[19]:
|
559 |
+
|
560 |
+
|
561 |
+
coco_file = "coco_instances_results_combined_filter_box_size.json"
|
562 |
+
train_file_name="test_All.json"
|
563 |
+
detectron_train_file_name = "boardetect_test_coco_format_All.json"
|
564 |
+
|
565 |
+
train_file_name=f"test_{args.all_labels_file}.json"
|
566 |
+
detectron_train_file_name = f"boardetect_test_coco_format_{args.all_labels_file}.json"
|
567 |
+
store_file_path = store_file_path_main + f"_filter_bbox.txt" if store_file_path_main else None
|
568 |
+
if store_file_path:
|
569 |
+
result_file = open(store_file_path,'a+')
|
570 |
+
result_file.write(str(video_num))
|
571 |
+
result_file.write(",")
|
572 |
+
result_file.close()
|
573 |
+
# visualize_gt_pred(data_path, images_path, coco_eval_path, test_json_path,
|
574 |
+
# train_file_name=train_file_name,
|
575 |
+
# detectron_train_file_name = detectron_train_file_name, coco_file=coco_file)
|
576 |
+
pr, rc = get_statistics(data_path, images_path, coco_eval_path, test_json_path,
|
577 |
+
train_file_name=train_file_name, detectron_train_file_name = detectron_train_file_name,
|
578 |
+
iou_thresh=0.1,coco_file=coco_file,print_details=False, store_results_path=store_file_path)
|
579 |
+
print("Precision: ", pr)
|
580 |
+
print("Recall: ", rc)
|
581 |
+
precision["Filter Box Size"] = pr
|
582 |
+
recall["Filter Box Size"] = rc
|
583 |
+
|
584 |
+
|
585 |
+
# ### Step 4: Filter on Predicted Bacteria Confidence Score
|
586 |
+
|
587 |
+
# In[20]:
|
588 |
+
|
589 |
+
|
590 |
+
coco_file = "coco_instances_results_combined_filter_conf_score.json"
|
591 |
+
train_file_name="test_All.json"
|
592 |
+
detectron_train_file_name = "boardetect_test_coco_format_All.json"
|
593 |
+
|
594 |
+
train_file_name=f"test_{args.all_labels_file}.json"
|
595 |
+
detectron_train_file_name = f"boardetect_test_coco_format_{args.all_labels_file}.json"
|
596 |
+
store_file_path = store_file_path_main + f"_filter_conf_score.txt" if store_file_path_main else None
|
597 |
+
if store_file_path:
|
598 |
+
result_file = open(store_file_path,'a+')
|
599 |
+
result_file.write(str(video_num))
|
600 |
+
result_file.write(",")
|
601 |
+
result_file.close()
|
602 |
+
# visualize_gt_pred(data_path, images_path, coco_eval_path, test_json_path,
|
603 |
+
# train_file_name=train_file_name,
|
604 |
+
# detectron_train_file_name = detectron_train_file_name, coco_file=coco_file)
|
605 |
+
pr, rc = get_statistics(data_path, images_path, coco_eval_path, test_json_path,
|
606 |
+
train_file_name=train_file_name, detectron_train_file_name = detectron_train_file_name,
|
607 |
+
iou_thresh=0.1,coco_file=coco_file,print_details=False, store_results_path=store_file_path)
|
608 |
+
print("Precision: ", pr)
|
609 |
+
print("Recall: ", rc)
|
610 |
+
precision["Filter Conf Score"] = pr
|
611 |
+
recall["Filter Conf Score"] = rc
|
612 |
+
|
613 |
+
|
614 |
+
# ### Step 4: Filter using NMS
|
615 |
+
|
616 |
+
# In[21]:
|
617 |
+
|
618 |
+
|
619 |
+
coco_file = "coco_instances_results_combined_filter_nms.json"
|
620 |
+
train_file_name="test_All.json"
|
621 |
+
detectron_train_file_name = "boardetect_test_coco_format_All.json"
|
622 |
+
|
623 |
+
train_file_name=f"test_{args.all_labels_file}.json"
|
624 |
+
detectron_train_file_name = f"boardetect_test_coco_format_{args.all_labels_file}.json"
|
625 |
+
|
626 |
+
store_file_path = store_file_path_main + f"_filter_nms.txt" if store_file_path_main else None
|
627 |
+
if store_file_path:
|
628 |
+
result_file = open(store_file_path,'a+')
|
629 |
+
result_file.write(str(video_num))
|
630 |
+
result_file.write(",")
|
631 |
+
result_file.close()
|
632 |
+
# visualize_gt_pred(data_path, images_path, coco_eval_path, test_json_path,
|
633 |
+
# train_file_name=train_file_name,
|
634 |
+
# detectron_train_file_name = detectron_train_file_name, coco_file=coco_file)
|
635 |
+
pr, rc = get_statistics(data_path, images_path, coco_eval_path, test_json_path,
|
636 |
+
train_file_name=train_file_name, detectron_train_file_name = detectron_train_file_name,
|
637 |
+
iou_thresh=0.1,coco_file=coco_file,print_details=False, store_results_path=store_file_path)
|
638 |
+
print("Precision: ", pr)
|
639 |
+
print("Recall: ", rc)
|
640 |
+
precision["Filter NMS"] = pr
|
641 |
+
recall["Filter NMS"] = rc
|
642 |
+
|
643 |
+
|
644 |
+
|
645 |
+
# ## Step 5: Tracking
|
646 |
+
|
647 |
+
# In[22]:
|
648 |
+
|
649 |
+
|
650 |
+
coco_file = f"./video{video_num}_tracking_predictions.json"
|
651 |
+
train_file_name="test_All.json"
|
652 |
+
detectron_train_file_name = "boardetect_test_coco_format_All.json"
|
653 |
+
|
654 |
+
train_file_name=f"test_{args.all_labels_file}.json"
|
655 |
+
detectron_train_file_name = f"boardetect_test_coco_format_{args.all_labels_file}.json"
|
656 |
+
store_file_path = store_file_path_main + f"_tracking.txt" if store_file_path_main else None
|
657 |
+
if store_file_path:
|
658 |
+
result_file = open(store_file_path,'a+')
|
659 |
+
result_file.write(str(video_num))
|
660 |
+
result_file.write(",")
|
661 |
+
result_file.close()
|
662 |
+
# visualize_gt_pred(data_path, images_path, coco_eval_path, test_json_path,
|
663 |
+
# train_file_name=train_file_name,
|
664 |
+
# detectron_train_file_name = detectron_train_file_name, coco_file=coco_file)
|
665 |
+
pr, rc = get_statistics(data_path, images_path, coco_eval_path, test_json_path,
|
666 |
+
train_file_name=train_file_name, detectron_train_file_name = detectron_train_file_name,
|
667 |
+
iou_thresh=0.1,coco_file=coco_file,print_details=False, store_results_path=store_file_path)
|
668 |
+
print("Precision: ", pr)
|
669 |
+
print("Recall: ", rc)
|
670 |
+
precision["Tracking"] = pr
|
671 |
+
recall["Tracking"] = rc
|
672 |
+
|
673 |
+
|
674 |
+
|
675 |
+
|
676 |
+
# ### Step 6: Filter on Track length
|
677 |
+
|
678 |
+
# In[23]:
|
679 |
+
|
680 |
+
coco_file = "coco_instances_results_final.json"
|
681 |
+
train_file_name="test_All.json"
|
682 |
+
detectron_train_file_name = "boardetect_test_coco_format_All.json"
|
683 |
+
|
684 |
+
train_file_name=f"test_{args.all_labels_file}.json"
|
685 |
+
detectron_train_file_name = f"boardetect_test_coco_format_{args.all_labels_file}.json"
|
686 |
+
|
687 |
+
|
688 |
+
|
689 |
+
store_file_path = store_file_path_main + f"_filter_track_length.txt" if store_file_path_main else None
|
690 |
+
if store_file_path:
|
691 |
+
result_file = open(store_file_path,'a+')
|
692 |
+
result_file.write(str(video_num))
|
693 |
+
result_file.write(",")
|
694 |
+
result_file.close()
|
695 |
+
# visualize_gt_pred(data_path, images_path, coco_eval_path, test_json_path,
|
696 |
+
# train_file_name=train_file_name,
|
697 |
+
# detectron_train_file_name = detectron_train_file_name, coco_file=coco_file)
|
698 |
+
pr, rc = get_statistics(data_path, images_path, coco_eval_path, test_json_path,
|
699 |
+
train_file_name=train_file_name, detectron_train_file_name = detectron_train_file_name,
|
700 |
+
iou_thresh=0.1,coco_file=coco_file,print_details=False, store_results_path=store_file_path)
|
701 |
+
print("Precision: ", pr)
|
702 |
+
print("Recall: ", rc)
|
703 |
+
precision["Filter Track Length"] = pr
|
704 |
+
recall["Filter Track Length"] = rc
|
705 |
+
|
706 |
+
|
707 |
+
|
708 |
+
# #motility analysis
|
709 |
+
|
710 |
+
# coco_file = "coco_pred_motile.json"
|
711 |
+
# train_file_name="test_All.json"
|
712 |
+
# detectron_train_file_name = "coco_gt_motile.json"
|
713 |
+
# store_file_path = store_file_path_main + f"_motile.txt" if store_file_path_main else None
|
714 |
+
# if store_file_path:
|
715 |
+
# result_file = open(store_file_path,'a+')
|
716 |
+
# result_file.write(str(video_num))
|
717 |
+
# result_file.write(",")
|
718 |
+
# result_file.close()
|
719 |
+
# # visualize_gt_pred(data_path, images_path, coco_eval_path, test_json_path,
|
720 |
+
# # train_file_name=train_file_name,
|
721 |
+
# # detectron_train_file_name = detectron_train_file_name, coco_file=coco_file)
|
722 |
+
# pr, rc = get_statistics(data_path, images_path, coco_eval_path, test_json_path,
|
723 |
+
# train_file_name=train_file_name, detectron_train_file_name = detectron_train_file_name,
|
724 |
+
# iou_thresh=0.1,coco_file=coco_file,print_details=False, store_results_path=store_file_path)
|
725 |
+
# print("Precision: ", pr)
|
726 |
+
# print("Recall: ", rc)
|
727 |
+
|
728 |
+
# coco_file = "coco_pred_non_motile.json"
|
729 |
+
# train_file_name="test_All.json"
|
730 |
+
# detectron_train_file_name = "coco_gt_non_motile.json"
|
731 |
+
# store_file_path = store_file_path_main + f"_non_motile.txt" if store_file_path_main else None
|
732 |
+
# if store_file_path:
|
733 |
+
# result_file = open(store_file_path,'a+')
|
734 |
+
# result_file.write(str(video_num))
|
735 |
+
# result_file.write(",")
|
736 |
+
# result_file.close()
|
737 |
+
# # visualize_gt_pred(data_path, images_path, coco_eval_path, test_json_path,
|
738 |
+
# # train_file_name=train_file_name,
|
739 |
+
# # detectron_train_file_name = detectron_train_file_name, coco_file=coco_file)
|
740 |
+
# pr, rc = get_statistics(data_path, images_path, coco_eval_path, test_json_path,
|
741 |
+
# train_file_name=train_file_name, detectron_train_file_name = detectron_train_file_name,
|
742 |
+
# iou_thresh=0.1,coco_file=coco_file,print_details=False, store_results_path=store_file_path)
|
743 |
+
# print("Precision: ", pr)
|
744 |
+
# print("Recall: ", rc)
|
745 |
+
|
746 |
+
# In[24]:
|
747 |
+
|
748 |
+
|
749 |
+
precision
|
750 |
+
|
751 |
+
|
752 |
+
# In[25]:
|
753 |
+
|
754 |
+
|
755 |
+
recall
|
MEMTrack/src/evaluation_step_wise_trackmate.py
ADDED
@@ -0,0 +1,401 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
import random
|
2 |
+
import os
|
3 |
+
import cv2
|
4 |
+
import json
|
5 |
+
import matplotlib
|
6 |
+
import argparse
|
7 |
+
from matplotlib import image, patches
|
8 |
+
import matplotlib.pyplot as pyplot
|
9 |
+
import seaborn as sns
|
10 |
+
import numpy as np
|
11 |
+
import seaborn as sns
|
12 |
+
import pandas as pd
|
13 |
+
import matplotlib.pyplot as plt
|
14 |
+
|
15 |
+
ap = argparse.ArgumentParser(description='Inference')
|
16 |
+
ap.add_argument('--video_map_path', type=str, metavar='PATH')
|
17 |
+
ap.add_argument('--data_path', type=str, metavar='PATH')
|
18 |
+
ap.add_argument('--video', type=str, metavar='PATH')
|
19 |
+
ap.add_argument('--filename',type=str, metavar='PATH')
|
20 |
+
args = ap.parse_args()
|
21 |
+
|
22 |
+
def bb_intersection_over_union(boxA, boxB):
|
23 |
+
# determine the (x, y)-coordinates of the intersection rectangle
|
24 |
+
xA = max(boxA[0], boxB[0])
|
25 |
+
yA = max(boxA[1], boxB[1])
|
26 |
+
xB = min(boxA[2], boxB[2])
|
27 |
+
yB = min(boxA[3], boxB[3])
|
28 |
+
|
29 |
+
# compute the area of intersection rectangle
|
30 |
+
interArea = abs(max((xB - xA, 0)) * max((yB - yA), 0))
|
31 |
+
if interArea == 0:
|
32 |
+
return 0
|
33 |
+
|
34 |
+
# compute the area of both the prediction and ground-truth
|
35 |
+
# rectangles
|
36 |
+
boxAArea = abs((boxA[2] - boxA[0]) * (boxA[3] - boxA[1]))
|
37 |
+
boxBArea = abs((boxB[2] - boxB[0]) * (boxB[3] - boxB[1]))
|
38 |
+
|
39 |
+
# compute the intersection over union by taking the intersection
|
40 |
+
# area and dividing it by the sum of prediction + ground-truth
|
41 |
+
# areas - the interesection area
|
42 |
+
iou_box = interArea / float(boxAArea + boxBArea - interArea)
|
43 |
+
|
44 |
+
# return the intersection over union value
|
45 |
+
return iou_box
|
46 |
+
|
47 |
+
def get_iou_mat_image(detectron_train_file_bbox_list, coco_results_file_bbox_list ):
|
48 |
+
# iou_mat = []
|
49 |
+
# for gt_box in detectron_train_file_bbox_list:
|
50 |
+
# iou_row = []
|
51 |
+
# for pred_box in coco_results_file_bbox_list:
|
52 |
+
# gt_box_xyxy = [gt_box[0], gt_box[1], gt_box[0] + gt_box[2], gt_box[1] + gt_box[3]]
|
53 |
+
# pred_box_xyxy = [pred_box[0], pred_box[1], pred_box[0] + pred_box[2], pred_box[1] + pred_box[3]]
|
54 |
+
# # print(pred_box_xyxy)
|
55 |
+
# iou_boxes = bb_intersection_over_union(gt_box_xyxy, pred_box_xyxy)
|
56 |
+
# iou_row.append(iou_boxes)
|
57 |
+
# iou_mat.append(iou_row)
|
58 |
+
iou_mat = np.zeros((len(detectron_train_file_bbox_list), len(coco_results_file_bbox_list)))
|
59 |
+
for i, gt_box in enumerate(detectron_train_file_bbox_list):
|
60 |
+
for j, pred_box in enumerate(coco_results_file_bbox_list):
|
61 |
+
gt_box_xyxy = [gt_box[0], gt_box[1], gt_box[0] + gt_box[2], gt_box[1] + gt_box[3]]
|
62 |
+
pred_box_xyxy = [pred_box[0], pred_box[1], pred_box[0] + pred_box[2], pred_box[1] + pred_box[3]]
|
63 |
+
iou_mat[i, j] = bb_intersection_over_union(gt_box_xyxy, pred_box_xyxy)
|
64 |
+
return iou_mat
|
65 |
+
|
66 |
+
def get_matching_boxes_iou(detectron_train_file_bbox_list, iou_mat, iou_thresh = 0.75):
|
67 |
+
matching_iou_boxes = []
|
68 |
+
iou_mat = iou_mat.tolist()
|
69 |
+
for i in range(0, len(detectron_train_file_bbox_list)):
|
70 |
+
if iou_mat[i]:
|
71 |
+
iou_row_max = max(iou_mat[i])
|
72 |
+
iou_row_max_pred_id = iou_mat[i].index(iou_row_max)
|
73 |
+
# print("iou_mat:", iou_mat)
|
74 |
+
# print((iou_row_max))
|
75 |
+
# print(iou_row_max_pred_id)
|
76 |
+
#print(iou_row_max)
|
77 |
+
if iou_row_max>iou_thresh:
|
78 |
+
matching_iou_boxes.append([i, iou_row_max_pred_id, iou_row_max])
|
79 |
+
# print(matching_iou_boxes)
|
80 |
+
#print("Number of matching IOU Ground truth and Predicted boxes: " , len(matching_iou_boxes))
|
81 |
+
return matching_iou_boxes
|
82 |
+
|
83 |
+
def compute_mse_matching_boxes(matching_iou_boxes, detectron_train_file_bbox_list, coco_results_file_bbox_list):
|
84 |
+
# Compute MSE of fiber intersection based on matching iou-boxes
|
85 |
+
mse = []
|
86 |
+
iou_sum = 0
|
87 |
+
for matching_bbox in matching_iou_boxes:
|
88 |
+
# ground truth
|
89 |
+
x = detectron_train_file_bbox_list[matching_bbox[0]][0]
|
90 |
+
y = detectron_train_file_bbox_list[matching_bbox[0]][1]
|
91 |
+
w = detectron_train_file_bbox_list[matching_bbox[0]][2]
|
92 |
+
h = detectron_train_file_bbox_list[matching_bbox[0]][3]
|
93 |
+
center_x_gt = x + (w/2)
|
94 |
+
center_y_gt = y + (h/2)
|
95 |
+
|
96 |
+
# predicted
|
97 |
+
x = coco_results_file_bbox_list[matching_bbox[1]][0]
|
98 |
+
y = coco_results_file_bbox_list[matching_bbox[1]][1]
|
99 |
+
w = coco_results_file_bbox_list[matching_bbox[1]][2]
|
100 |
+
h = coco_results_file_bbox_list[matching_bbox[1]][3]
|
101 |
+
center_x_pred = x + (w / 2)
|
102 |
+
center_y_pred = y + (h / 2)
|
103 |
+
|
104 |
+
mse.append(((center_x_gt - center_x_pred)**2 + (center_y_gt - center_y_pred)** 2)**0.5)
|
105 |
+
iou_sum += matching_bbox[2]
|
106 |
+
|
107 |
+
# np.array(matching_iou_boxes)[2].mean()
|
108 |
+
if len(matching_iou_boxes)>0:
|
109 |
+
return np.array(mse).mean(), iou_sum/len(matching_iou_boxes), mse
|
110 |
+
else:
|
111 |
+
return np.array(mse).mean(), -1, mse
|
112 |
+
|
113 |
+
def nms_predicted_bounding_boxes(pred_bbox_list, pred_bbox_scores, iou_thresh_nms=0.95):
|
114 |
+
#NMS of predicted boxes
|
115 |
+
#print(len(pred_bbox_list))
|
116 |
+
final_tracking_bbox_list =[]
|
117 |
+
iou_mat = get_iou_mat_image(pred_bbox_list, pred_bbox_list)
|
118 |
+
|
119 |
+
matching_pred_boxes_iou = get_matching_boxes_iou(pred_bbox_list, iou_mat, iou_thresh = iou_thresh_nms)
|
120 |
+
while(len(matching_pred_boxes_iou)>0):
|
121 |
+
sorted_bbox_list = sorted(zip(pred_bbox_scores, pred_bbox_list))
|
122 |
+
iou_mat = get_iou_mat_image(pred_bbox_list, [pred_bbox_list[-1]])
|
123 |
+
#print(iou_mat)
|
124 |
+
pred_bbox_list_temp = []
|
125 |
+
pred_bbox_scores_temp = []
|
126 |
+
for index, iou in enumerate(iou_mat):
|
127 |
+
if iou[0]<iou_thresh_nms:
|
128 |
+
pred_bbox_list_temp.append(pred_bbox_list[index])
|
129 |
+
pred_bbox_scores_temp.append(pred_bbox_scores[index])
|
130 |
+
# matching_pred_boxes_iou = get_matching_boxes_iou(pred_bbox_list, iou_mat, iou_thresh = iou_thresh_nms)
|
131 |
+
# print(matching_pred_boxes_iou)
|
132 |
+
final_tracking_bbox_list.append(pred_bbox_list[-1]) #add highest scored bbox to final list
|
133 |
+
# matching_pred_boxes_index=[]
|
134 |
+
# for bbox in matching_pred_boxes_iou:
|
135 |
+
# matching_pred_boxes_index.append(bbox[0])
|
136 |
+
# print("hello",matching_pred_boxes_index)
|
137 |
+
|
138 |
+
# for index, bbox in enumerate(pred_bbox_list):
|
139 |
+
# if index not in matching_pred_boxes_index:
|
140 |
+
# pred_bbox_list_temp.append(pred_bbox_list[index])
|
141 |
+
# pred_bbox_scores_temp.append(pred_bbox_scores[index])
|
142 |
+
pred_bbox_list = pred_bbox_list_temp
|
143 |
+
pred_bbox_scores = pred_bbox_scores_temp
|
144 |
+
iou_mat = get_iou_mat_image(pred_bbox_list, pred_bbox_list)
|
145 |
+
matching_pred_boxes_iou = get_matching_boxes_iou(pred_bbox_list, iou_mat, iou_thresh = iou_thresh_nms)
|
146 |
+
|
147 |
+
|
148 |
+
#print(final_tracking_bbox_list + (pred_bbox_list))
|
149 |
+
# print(len(final_tracking_bbox_list))
|
150 |
+
# print(len(pred_bbox_list))
|
151 |
+
#print(len(final_tracking_bbox_list + (pred_bbox_list)))
|
152 |
+
return final_tracking_bbox_list + (pred_bbox_list)
|
153 |
+
|
154 |
+
|
155 |
+
def get_statistics(data_path, images_path, coco_eval_path, test_json_path, train_file_name="train.json", detectron_train_file_name = "boardetect_train_coco_format.json",iou_thresh=0.75, bacteria_tp_path="./bacteria-detections/", coco_file = "coco_instances_results.json",print_details=True, store_results_path = None):
|
156 |
+
# data_path = "./data_phase_nodes/"
|
157 |
+
# images_path = data_path + "images_cropped_resized/"
|
158 |
+
# coco_eval_path = "./coco_eval_phase_cropped/"
|
159 |
+
num_images = 0
|
160 |
+
tp_sum = 0
|
161 |
+
fp_sum = 0
|
162 |
+
fn_sum = 0
|
163 |
+
mse_image_counter =0
|
164 |
+
iou_image_counter =0
|
165 |
+
ratio_gt_p = 0
|
166 |
+
ratio_p_gt = 0
|
167 |
+
mse = 0
|
168 |
+
precision = 0
|
169 |
+
recall = 0
|
170 |
+
iou = 0
|
171 |
+
all_img_mse = []
|
172 |
+
for filename in sorted(os.listdir(images_path)):
|
173 |
+
# print(filename)
|
174 |
+
image_id = filename.split(".tif")[0]
|
175 |
+
#print(image_id)
|
176 |
+
train_file = json.load(open(data_path + train_file_name,'r'))
|
177 |
+
train_file_bbox_list = []
|
178 |
+
#print(train_file[int(image_id)]["file_name"])
|
179 |
+
if train_file[int(image_id)]["file_name"] == filename:
|
180 |
+
for i, annotation in enumerate(train_file[int(image_id)]["annotations"]):
|
181 |
+
train_file_bbox_list.append(train_file[int(image_id)]["annotations"][i]["bbox"])
|
182 |
+
# print("Number of Ground Truth boxes: ",len(train_file_bbox_list))
|
183 |
+
# print(train_file_bbox_list[0])
|
184 |
+
|
185 |
+
detectron_train_file = json.load(open(test_json_path + detectron_train_file_name,'r'))
|
186 |
+
detectron_train_file_bbox_list = []
|
187 |
+
|
188 |
+
for i,annotation in enumerate(detectron_train_file["annotations"]):
|
189 |
+
#print(detectron_train_file["annotations"][i]["image_id"])
|
190 |
+
#print(image_id)
|
191 |
+
#if detectron_train_file["images"]["annotations"][i]["image_id"] == int(image_id):
|
192 |
+
if detectron_train_file["annotations"][i]["image_id"] == int(image_id):
|
193 |
+
#print(true)
|
194 |
+
detectron_train_file_bbox_list.append(detectron_train_file["annotations"][i]["bbox"])
|
195 |
+
#annotation_image_id = detectron_train_file["annotations"][i]["image_id"]
|
196 |
+
#print(detectron_train_file["annotations"][i]["bbox"])
|
197 |
+
# print(len(detectron_train_file_bbox_list))
|
198 |
+
# print(detectron_train_file_bbox_list[0])
|
199 |
+
|
200 |
+
coco_results_file = json.load(open(coco_eval_path + coco_file,'r'))
|
201 |
+
coco_results_file_bbox_list = []
|
202 |
+
coco_results_file_bbox_scores = []
|
203 |
+
|
204 |
+
for i,annotation in enumerate(coco_results_file):
|
205 |
+
#print(coco_results_file[i])
|
206 |
+
#print(image_id)
|
207 |
+
if coco_results_file[i]["image_id"] == int(image_id):
|
208 |
+
#print(true)
|
209 |
+
coco_results_file_bbox_list.append(coco_results_file[i]["bbox"])
|
210 |
+
coco_results_file_bbox_scores.append(coco_results_file[i]["score"])
|
211 |
+
# print(coco_results_file[i]["bbox"])
|
212 |
+
# print(len(coco_results_file_bbox_list))
|
213 |
+
# print((coco_results_file_bbox_list))
|
214 |
+
#print(detectron_train_file["annotations"][i]["bbox"])
|
215 |
+
# print("Number of Predicted bounding boxes: ", len(coco_results_file_bbox_list))
|
216 |
+
# print(coco_results_file_bbox_list[0])
|
217 |
+
|
218 |
+
#coco_results_file_bbox_list = nms_predicted_bounding_boxes(coco_results_file_bbox_list, coco_results_file_bbox_scores ,iou_thresh_nms=0.001)
|
219 |
+
|
220 |
+
# Stat 1 - Ratio of boxes Predicted to Groud truth boxes
|
221 |
+
num_predicted_boxes = len(coco_results_file_bbox_list)
|
222 |
+
num_ground_truth_boxes = len(detectron_train_file_bbox_list)
|
223 |
+
|
224 |
+
if num_ground_truth_boxes>0:
|
225 |
+
|
226 |
+
#Ratio of GT to P per image
|
227 |
+
ratio_gt_p = ratio_gt_p + (num_ground_truth_boxes/ max(num_predicted_boxes,1))
|
228 |
+
ratio_p_gt = ratio_p_gt + (num_predicted_boxes / max(num_ground_truth_boxes,1))
|
229 |
+
num_images = num_images + 1
|
230 |
+
|
231 |
+
# Stat 2 - MSE of fiber intersections from the matching boxes
|
232 |
+
iou_mat = get_iou_mat_image(detectron_train_file_bbox_list, coco_results_file_bbox_list)
|
233 |
+
matching_boxes_iou = get_matching_boxes_iou(detectron_train_file_bbox_list, iou_mat, iou_thresh = iou_thresh)
|
234 |
+
mse_image, iou_image, mse_list = compute_mse_matching_boxes(matching_boxes_iou, detectron_train_file_bbox_list, coco_results_file_bbox_list)
|
235 |
+
if mse_image>=0: #if no predicted boxes or no ground truth boxes then iou is nan
|
236 |
+
mse = mse + mse_image
|
237 |
+
mse_image_counter += 1
|
238 |
+
if iou_image>=0:#if no predicted boxes or no ground truth boxes then mse is nan
|
239 |
+
iou += iou_image
|
240 |
+
iou_image_counter += 1
|
241 |
+
|
242 |
+
true_positive = len(matching_boxes_iou)
|
243 |
+
# print("num image: ", num_images)
|
244 |
+
# print("Num pred: ", num_predicted_boxes)
|
245 |
+
# print("num matched: ", len(matching_boxes_iou))
|
246 |
+
os.makedirs(bacteria_tp_path, exist_ok=True)
|
247 |
+
coord_file = open(bacteria_tp_path + image_id +".txt",'w')
|
248 |
+
coord_file.write(image_id)
|
249 |
+
coord_file.write(" ")
|
250 |
+
coord_file.write(str(true_positive))
|
251 |
+
coord_file.write("\n")
|
252 |
+
coord_file.close()
|
253 |
+
|
254 |
+
# OLD METHOD OF PRECISION RECALL CALCULATION
|
255 |
+
# assumes 0 for no tp
|
256 |
+
# precision is average of averages
|
257 |
+
# if true_positive>0:
|
258 |
+
# false_positive = num_predicted_boxes - true_positive
|
259 |
+
# false_negative = num_ground_truth_boxes - true_positive
|
260 |
+
|
261 |
+
# precision += true_positive/(true_positive + false_positive)
|
262 |
+
|
263 |
+
# recall += true_positive/(true_positive + false_negative)
|
264 |
+
all_img_mse.extend(mse_list)
|
265 |
+
|
266 |
+
# UPDATED METHOD
|
267 |
+
# precision is total tp / total tp+fp in all images not average of preciosn for all images
|
268 |
+
|
269 |
+
false_positive = num_predicted_boxes - true_positive
|
270 |
+
false_negative = num_ground_truth_boxes - true_positive
|
271 |
+
|
272 |
+
tp_sum = tp_sum + true_positive
|
273 |
+
fp_sum = fp_sum + false_positive
|
274 |
+
fn_sum = fn_sum + false_negative
|
275 |
+
|
276 |
+
|
277 |
+
|
278 |
+
|
279 |
+
# Stat 1 - Ratio of boxes Predicted to Groud truth boxes
|
280 |
+
avg_ratio_gt_p = ratio_gt_p/num_images
|
281 |
+
avg_ratio_p_gt = ratio_p_gt/num_images
|
282 |
+
|
283 |
+
|
284 |
+
# Stat 2 - MSE of fiber intersections from the matching boxes
|
285 |
+
try:
|
286 |
+
avg_mse = mse / mse_image_counter
|
287 |
+
except:
|
288 |
+
avg_mse = 0
|
289 |
+
|
290 |
+
|
291 |
+
try:
|
292 |
+
avg_iou = iou / iou_image_counter
|
293 |
+
except:
|
294 |
+
avg_iou = 0
|
295 |
+
|
296 |
+
try:
|
297 |
+
|
298 |
+
avg_prec = tp_sum / (tp_sum + fp_sum)
|
299 |
+
avg_recall = tp_sum / (tp_sum + fn_sum)
|
300 |
+
except:
|
301 |
+
avg_prec = 0
|
302 |
+
avg_recall = 0
|
303 |
+
|
304 |
+
if store_results_path:
|
305 |
+
result_file = open(store_results_path,'a+')
|
306 |
+
result_file.write(str(tp_sum))
|
307 |
+
result_file.write(",")
|
308 |
+
result_file.write(str(fp_sum))
|
309 |
+
result_file.write(",")
|
310 |
+
result_file.write(str(fn_sum))
|
311 |
+
result_file.write("\n")
|
312 |
+
result_file.close()
|
313 |
+
|
314 |
+
if print_details:
|
315 |
+
# ap = precision/num_images
|
316 |
+
# ar = recall/num_images
|
317 |
+
print("Average Ground Truth to Predicted Ratio: ", avg_ratio_gt_p)
|
318 |
+
print("Average Predicted to Ground Truth Ratio: ", avg_ratio_p_gt)
|
319 |
+
print("Average Mean Squared Error of fiber intersections: ", avg_mse)
|
320 |
+
print("Average IoU of TP boxes: ", avg_iou)
|
321 |
+
print("TP sum: ", tp_sum)
|
322 |
+
print("FP sum: ", fp_sum)
|
323 |
+
print("FN sum: ", fn_sum)
|
324 |
+
|
325 |
+
print("Average Precision: ", avg_prec)
|
326 |
+
print("Average Recall: ", avg_recall)
|
327 |
+
|
328 |
+
pyplot.figure(figsize=(10, 3))
|
329 |
+
sns.distplot(all_img_mse)
|
330 |
+
pyplot.xlabel("Nodal Errors (in px sq)")
|
331 |
+
# plt.xlabel("")
|
332 |
+
pyplot.grid("on", alpha = 0.3)
|
333 |
+
pyplot.show()
|
334 |
+
return avg_prec, avg_recall
|
335 |
+
|
336 |
+
video_map_path = args.video_map_path #"/alldata/medha/CleanCodeData/Data/videomap.txt"
|
337 |
+
video_map = open(video_map_path,'r',)
|
338 |
+
header = [x.strip() for x in (video_map.readline().strip()).split(",")]
|
339 |
+
video_num_id = header.index("video_num")
|
340 |
+
strain_id = header.index("strain")
|
341 |
+
strain_map = {}
|
342 |
+
for line in video_map.readlines():
|
343 |
+
line_details = [x.strip() for x in line.split(",")]
|
344 |
+
video_num = line_details[video_num_id]
|
345 |
+
strain = line_details[strain_id]
|
346 |
+
strain_map[video_num] = strain
|
347 |
+
strain_map[""] = "all"
|
348 |
+
|
349 |
+
#53 54 58 59 60 64 65 69 70 71 75 76 80 81 82 collagen
|
350 |
+
#85 24 30 35 83 84 agar0.2
|
351 |
+
#updated gar 19 22 29 30 83 84
|
352 |
+
#53 60 64 69 75 82
|
353 |
+
video_num = args.video
|
354 |
+
src_path = args.data_path #"/alldata/medha/CleanCodeData/DataFeatures/exp_collagen_train_veryhard/"
|
355 |
+
print(src_path)
|
356 |
+
video_path = f"data_video{video_num}_feature_optical_flow_median_back_2pyr_18win_background_img/"
|
357 |
+
data_path = os.path.join(src_path,video_path, "test/")
|
358 |
+
print(data_path)
|
359 |
+
images_path = data_path + "images/"
|
360 |
+
coco_eval_path = data_path
|
361 |
+
test_json_path = data_path
|
362 |
+
precision = {}
|
363 |
+
recall = {}
|
364 |
+
store_file_path_main = src_path + "test_set_results"
|
365 |
+
|
366 |
+
|
367 |
+
|
368 |
+
|
369 |
+
coco_file=args.filename #f"mosaic_coco.json"
|
370 |
+
train_file_name="test_All.json"
|
371 |
+
detectron_train_file_name = "boardetect_test_coco_format_All.json"
|
372 |
+
store_file_path = store_file_path_main + f"_trackmate.txt" if store_file_path_main else None
|
373 |
+
if store_file_path:
|
374 |
+
result_file = open(store_file_path,'a+')
|
375 |
+
result_file.write(str(video_num))
|
376 |
+
result_file.write(",")
|
377 |
+
result_file.close()
|
378 |
+
# visualize_gt_pred(data_path, images_path, coco_eval_path, test_json_path,
|
379 |
+
# train_file_name=train_file_name,
|
380 |
+
# detectron_train_file_name = detectron_train_file_name, coco_file=coco_file)
|
381 |
+
pr, rc = get_statistics(data_path, images_path, coco_eval_path, test_json_path,
|
382 |
+
train_file_name=train_file_name, detectron_train_file_name = detectron_train_file_name,
|
383 |
+
iou_thresh=0.1,coco_file=coco_file,print_details=False, store_results_path=store_file_path)
|
384 |
+
print("Precision: ", pr)
|
385 |
+
print("Recall: ", rc)
|
386 |
+
precision["Filter Track Length"] = pr
|
387 |
+
recall["Filter Track Length"] = rc
|
388 |
+
|
389 |
+
|
390 |
+
|
391 |
+
|
392 |
+
# In[24]:
|
393 |
+
|
394 |
+
|
395 |
+
precision
|
396 |
+
|
397 |
+
|
398 |
+
# In[25]:
|
399 |
+
|
400 |
+
|
401 |
+
recall
|
MEMTrack/src/inferenceBacteriaRetinanet_Motility.py
ADDED
@@ -0,0 +1,338 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
# Some basic setup:
|
2 |
+
# Setup detectron2 logger
|
3 |
+
import detectron2
|
4 |
+
from detectron2.utils.logger import setup_logger
|
5 |
+
setup_logger()
|
6 |
+
|
7 |
+
# import some common libraries
|
8 |
+
import numpy as np
|
9 |
+
import os, json, cv2, random
|
10 |
+
import cv2
|
11 |
+
import pandas as pd
|
12 |
+
import shutil
|
13 |
+
import numpy as np
|
14 |
+
|
15 |
+
# import some common detectron2 utilities
|
16 |
+
from detectron2 import model_zoo
|
17 |
+
from detectron2.engine import DefaultPredictor
|
18 |
+
from detectron2.config import get_cfg
|
19 |
+
from detectron2.utils.visualizer import Visualizer
|
20 |
+
from detectron2.data import MetadataCatalog, DatasetCatalog
|
21 |
+
import json
|
22 |
+
from detectron2.structures import BoxMode
|
23 |
+
from detectron2.data import DatasetCatalog, MetadataCatalog
|
24 |
+
import argparse
|
25 |
+
from natsort import natsorted
|
26 |
+
import PIL
|
27 |
+
from PIL import Image
|
28 |
+
import matplotlib.pyplot as plt
|
29 |
+
from detectron2.engine import DefaultTrainer
|
30 |
+
from detectron2.evaluation import COCOEvaluator
|
31 |
+
from detectron2.config import get_cfg
|
32 |
+
import os
|
33 |
+
|
34 |
+
ap = argparse.ArgumentParser(description='Inference')
|
35 |
+
ap.add_argument('--video', default='29', type=str, metavar='PATH')
|
36 |
+
ap.add_argument('--source_path', default="./CombinationModel/data_feature_optical_flow_median_back_2pyr_18win_background_img/", type=str, metavar='PATH')
|
37 |
+
ap.add_argument('--output_dir', default="CombinationModel/easy-optical_flow_median_back_2pyr_18win_00125_34k", type=str, metavar='CELL PATH')
|
38 |
+
ap.add_argument('--test_dir', default="CombinationModel/easy-optical_flow_median_back_2pyr_18win_00125_34k", type=str, metavar='CELL PATH')
|
39 |
+
ap.add_argument('--annotations_train', default="Easy", type=str, metavar='TRAIN')
|
40 |
+
ap.add_argument('--annotations_test', default="Easy", type=str, metavar='TEST')
|
41 |
+
ap.add_argument('--epochs', default="90", type=str, metavar='TEST')
|
42 |
+
ap.add_argument('--lr', default="0.00125", type=str, metavar='TEST')
|
43 |
+
ap.add_argument('--custom_test_dir', type=str, metavar='CELL PATH')
|
44 |
+
ap.add_argument('--feature', default="images_feature/", type=str, metavar='FEATURE')
|
45 |
+
|
46 |
+
args = ap.parse_args()
|
47 |
+
print("lr: ", float(args.lr))
|
48 |
+
coco_format_train = pd.DataFrame(columns=["file_name","height","width","annotations"])
|
49 |
+
coco_format_train["annotations"] = coco_format_train["annotations"].astype('object')
|
50 |
+
|
51 |
+
video_num = args.video
|
52 |
+
|
53 |
+
#source = "./try_median_issue_data_feature_optical_flow_median_back_2pyr_18win_background_img/"
|
54 |
+
source = args.source_path
|
55 |
+
dest_train = source + "train/"
|
56 |
+
if args.custom_test_dir:
|
57 |
+
dest_test = args.custom_test_dir
|
58 |
+
else:
|
59 |
+
dest_test = args.test_dir + f"/data_video{video_num}_feature_optical_flow_median_back_2pyr_18win_background_img/test/"
|
60 |
+
#dest_test = f"./video{video_num}_feature_optical_flow_median_back_2pyr_18win/test/"
|
61 |
+
|
62 |
+
val = True
|
63 |
+
|
64 |
+
|
65 |
+
output_dir = ("try_median_issue_optical_flow_median_back_2pyr_18win_00125_34k_background_img")
|
66 |
+
#output_dir = ("./optical_flow_median_back/")
|
67 |
+
output_dir = args.output_dir
|
68 |
+
train_images_path = "/train" + args.feature #"/images_feature/"
|
69 |
+
val_image_path = "/val" + args.feature #"/images_feature/"
|
70 |
+
|
71 |
+
from contextlib import suppress
|
72 |
+
|
73 |
+
with suppress(OSError):
|
74 |
+
os.remove(output_dir +'/boardetect_test_coco_format.json.lock')
|
75 |
+
os.remove(output_dir +'/boardetect_test_coco_format.json')
|
76 |
+
os.remove(source + 'test.json')
|
77 |
+
|
78 |
+
#annotations_source = source + "annotation_easy_hard/"
|
79 |
+
images_source = source + args.feature #"/images_feature/"
|
80 |
+
|
81 |
+
if args.annotations_train == "Easy":
|
82 |
+
annotations_train = dest_train + "annotation_easy/"
|
83 |
+
elif args.annotations_train == "Hard":
|
84 |
+
annotations_train = dest_train + "annotation_hard/"
|
85 |
+
elif args.annotations_train == "VeryHard":
|
86 |
+
annotations_train = dest_train + "annotation_veryhard/"
|
87 |
+
elif args.annotations_train == "Easy+Hard":
|
88 |
+
annotations_train = dest_test + "annotation_easy_hard/"
|
89 |
+
elif args.annotations_train == "All":
|
90 |
+
annotations_train = dest_test + "annotation_easy_hard_veryhard/"
|
91 |
+
elif args.annotations_train == "Motility-low":
|
92 |
+
annotations_train = dest_train + "annotation_motility_low/"
|
93 |
+
elif args.annotations_train == "Motility-high":
|
94 |
+
annotations_train = dest_train + "annotation_motility_high/"
|
95 |
+
elif args.annotations_train == "Motility-wiggle":
|
96 |
+
annotations_train = dest_train + "annotation_motility_wiggle/"
|
97 |
+
elif args.annotations_train == "Motility-mid":
|
98 |
+
annotations_train = dest_train + "annotation_motility_mid/"
|
99 |
+
elif args.annotations_train == "Motility-motile":
|
100 |
+
annotations_train = dest_train + "annotation_motility_wiggle_mid_high/"
|
101 |
+
elif args.annotations_train == "Sticking-stick":
|
102 |
+
annotations_train = dest_train + "annotation_sticking_stick/"
|
103 |
+
elif args.annotations_train == "Sticking-motile":
|
104 |
+
annotations_train = dest_train + "annotation_sticking_motile/"
|
105 |
+
elif args.annotations_train == "Sticking-non_motile":
|
106 |
+
annotations_train = dest_train + "annotation_sticking_non_motile/"
|
107 |
+
elif args.annotations_train == "Motility-low-wiggle":
|
108 |
+
annotations_train = dest_train + "annotation_motility_low_wiggle/"
|
109 |
+
elif args.annotations_train == "Motility-mid-high":
|
110 |
+
annotations_train = dest_train + "annotation_motility_mid_high/"
|
111 |
+
|
112 |
+
if args.annotations_test == "Easy":
|
113 |
+
annotations_test = dest_test + "annotation_easy/"
|
114 |
+
elif args.annotations_test == "Hard":
|
115 |
+
annotations_test = dest_test + "annotation_hard/"
|
116 |
+
elif args.annotations_test == "VeryHard":
|
117 |
+
annotations_test = dest_test + "annotation_veryhard/"
|
118 |
+
elif args.annotations_test == "Easy+Hard":
|
119 |
+
annotations_test = dest_test + "annotation_easy_hard/"
|
120 |
+
elif args.annotations_test == "All":
|
121 |
+
annotations_test = dest_test + "annotation_easy_hard_veryhard/"
|
122 |
+
elif args.annotations_test == "Motility-low":
|
123 |
+
annotations_test = dest_test + "annotation_motility_low/"
|
124 |
+
elif args.annotations_test == "Motility-high":
|
125 |
+
annotations_test = dest_test + "annotation_motility_high/"
|
126 |
+
elif args.annotations_test == "Motility-wiggle":
|
127 |
+
annotations_test = dest_test + "annotation_motility_wiggle/"
|
128 |
+
elif args.annotations_test == "Motility-mid":
|
129 |
+
annotations_test = dest_test + "annotation_motility_mid/"
|
130 |
+
elif args.annotations_test == "Motility-motile":
|
131 |
+
annotations_test = dest_test + "annotation_motility_wiggle_mid_high/"
|
132 |
+
elif args.annotations_test == "Sticking-stick":
|
133 |
+
annotations_test = dest_test + "annotation_sticking_stick/"
|
134 |
+
elif args.annotations_test == "Sticking-motile":
|
135 |
+
annotations_test = dest_test + "annotation_sticking_motile/"
|
136 |
+
elif args.annotations_test == "Sticking-non_motile":
|
137 |
+
annotations_test = dest_test + "annotation_sticking_non_motile/"
|
138 |
+
elif args.annotations_test == "Motility-low-wiggle":
|
139 |
+
annotations_test = dest_test + "annotation_motility_low_wiggle/"
|
140 |
+
elif args.annotations_test == "Motility-mid-high":
|
141 |
+
annotations_test = dest_test + "annotation_motility_mid_high/"
|
142 |
+
|
143 |
+
|
144 |
+
|
145 |
+
#To test a particular bacteria
|
146 |
+
#annotations_test = dest_test + "bacteria/4/xy_coord/"
|
147 |
+
images_train = dest_train + args.feature #"/images_feature/"
|
148 |
+
images_test = dest_test + args.feature #"/images_feature/"
|
149 |
+
test_image_path = images_test
|
150 |
+
|
151 |
+
factor_w = 1#1024/1388
|
152 |
+
factor_h = 1#1024/1040
|
153 |
+
|
154 |
+
#function to get background frame
|
155 |
+
#function to get prev frame
|
156 |
+
#function to create new image given image num
|
157 |
+
|
158 |
+
for txt_file in natsorted(os.listdir(annotations_train)):
|
159 |
+
width = 31
|
160 |
+
text_file = open(annotations_train + txt_file, 'r')
|
161 |
+
xy_coords = text_file.readlines()
|
162 |
+
boxes = []
|
163 |
+
res=pd.DataFrame(columns=["file_name","height","width","annotations"])
|
164 |
+
image = PIL.Image.open(images_train + txt_file[:-4] + ".tif").convert('L')
|
165 |
+
image_feature = PIL.Image.open(images_train + txt_file[:-4] + ".tif")
|
166 |
+
image = image_feature
|
167 |
+
#print(image.size)
|
168 |
+
res.at[0,"height"] = image.height
|
169 |
+
res.at[0,"width"] = image.width
|
170 |
+
res.at[0,"file_name"] = txt_file[:-4]+".tif"
|
171 |
+
bbox_mode = 0
|
172 |
+
category_id = 0
|
173 |
+
# image2 = image.resize((1024,1024))
|
174 |
+
# image2.save(images_resized_train + txt_file[:-4] + ".jpg")
|
175 |
+
for xy in xy_coords:
|
176 |
+
box = []
|
177 |
+
x = float(xy.split(" ")[0])
|
178 |
+
y = float(xy.split(" ")[1])
|
179 |
+
x1 = int(x*factor_w - (width // 2))
|
180 |
+
y1 = int(y*factor_h - (width // 2))
|
181 |
+
x2 = int(x*factor_w + (width // 2))
|
182 |
+
y2 = int(y*factor_h + (width // 2))
|
183 |
+
w = h = 31
|
184 |
+
box = [x1, y1, x2, y2]
|
185 |
+
boxes.append(np.array(box))
|
186 |
+
#print(np.array(box))
|
187 |
+
|
188 |
+
res["annotations"]=res["annotations"].astype('object')
|
189 |
+
annotation_df = pd.DataFrame(columns=["bbox","bbox_mode","category_id"])
|
190 |
+
annotation_df["bbox"] = boxes
|
191 |
+
annotation_df["bbox_mode"] = bbox_mode
|
192 |
+
annotation_df["category_id"] = category_id
|
193 |
+
annotations = annotation_df.T.to_dict().values()
|
194 |
+
l = []
|
195 |
+
for j in annotations:
|
196 |
+
l.append(j)
|
197 |
+
res.at[0,"annotations"] = l
|
198 |
+
coco_format_train = coco_format_train.append(res)
|
199 |
+
coco_format_train.reset_index(drop=True,inplace=True)
|
200 |
+
|
201 |
+
coco_format_train.reset_index(inplace=True)
|
202 |
+
coco_format_train.rename(columns={"index":"image_id"},inplace=True)
|
203 |
+
coco_format_train.to_json(source + "train.json",orient="records")
|
204 |
+
|
205 |
+
coco_format_test = pd.DataFrame(columns=["file_name","height","width","annotations"])
|
206 |
+
coco_format_test["annotations"] = coco_format_test["annotations"].astype('object')
|
207 |
+
|
208 |
+
for txt_file in natsorted(os.listdir(annotations_test)):
|
209 |
+
width = 31
|
210 |
+
text_file = open(annotations_test + txt_file, 'r')
|
211 |
+
xy_coords = text_file.readlines()
|
212 |
+
boxes = []
|
213 |
+
res=pd.DataFrame(columns=["file_name","height","width","annotations"])
|
214 |
+
image = PIL.Image.open(images_test + txt_file[:-4] + ".tif").convert('L')
|
215 |
+
image_feature = PIL.Image.open(images_test + txt_file[:-4] + ".tif")
|
216 |
+
image = image_feature
|
217 |
+
#print(image.size)
|
218 |
+
res.at[0,"height"] = image.height
|
219 |
+
res.at[0,"width"] = image.width
|
220 |
+
res.at[0,"file_name"] = txt_file[:-4]+".tif"
|
221 |
+
bbox_mode = 0
|
222 |
+
category_id = 0
|
223 |
+
# image2 = image.resize((1024,1024))
|
224 |
+
# image2.save(images_resized_train + txt_file[:-4] + ".jpg")
|
225 |
+
for xy in xy_coords:
|
226 |
+
box = []
|
227 |
+
x = float(xy.split(" ")[0])
|
228 |
+
y = float(xy.split(" ")[1])
|
229 |
+
x1 = int(x*factor_w - (width // 2))
|
230 |
+
y1 = int(y*factor_h - (width // 2))
|
231 |
+
x2 = int(x*factor_w + (width // 2))
|
232 |
+
y2 = int(y*factor_h + (width // 2))
|
233 |
+
w = h = 31
|
234 |
+
box = [x1, y1, x2, y2]
|
235 |
+
boxes.append(np.array(box))
|
236 |
+
#print(np.array(box))
|
237 |
+
|
238 |
+
res["annotations"]=res["annotations"].astype('object')
|
239 |
+
annotation_df = pd.DataFrame(columns=["bbox","bbox_mode","category_id"])
|
240 |
+
annotation_df["bbox"] = boxes
|
241 |
+
annotation_df["bbox_mode"] = bbox_mode
|
242 |
+
annotation_df["category_id"] = category_id
|
243 |
+
annotations = annotation_df.T.to_dict().values()
|
244 |
+
l = []
|
245 |
+
for j in annotations:
|
246 |
+
l.append(j)
|
247 |
+
res.at[0,"annotations"] = l
|
248 |
+
coco_format_test = coco_format_test.append(res)
|
249 |
+
coco_format_test.reset_index(drop=True,inplace=True)
|
250 |
+
|
251 |
+
coco_format_test.reset_index(inplace=True)
|
252 |
+
coco_format_test.rename(columns={"index":"image_id"},inplace=True)
|
253 |
+
coco_format_test.to_json(source + "test.json",orient="records")
|
254 |
+
|
255 |
+
|
256 |
+
|
257 |
+
def get_board_dicts(imgdir, mode):
|
258 |
+
if mode is 'train':
|
259 |
+
json_file = imgdir+"/train.json" #Fetch the json file
|
260 |
+
if mode is 'test':
|
261 |
+
json_file = imgdir+"/test.json" #Fetch the json file
|
262 |
+
with open(json_file) as f:
|
263 |
+
dataset_dicts = json.load(f)
|
264 |
+
for i in dataset_dicts:
|
265 |
+
filename = i["file_name"]
|
266 |
+
if mode is 'train':
|
267 |
+
i["file_name"] = imgdir + train_images_path + filename
|
268 |
+
if mode is 'test':
|
269 |
+
i["file_name"] = test_image_path + filename
|
270 |
+
for j in i["annotations"]:
|
271 |
+
j["bbox_mode"] = BoxMode.XYXY_ABS #Setting the required Box Mode
|
272 |
+
j["category_id"] = int(j["category_id"])
|
273 |
+
return dataset_dicts
|
274 |
+
|
275 |
+
#Registering the Dataset
|
276 |
+
for d in ["train", "test"]:
|
277 |
+
DatasetCatalog.register("boardetect_" + d, lambda d=d: get_board_dicts(source, d))
|
278 |
+
MetadataCatalog.get("boardetect_" + d).set(thing_classes=["node"])
|
279 |
+
board_metadata = MetadataCatalog.get("boardetect_train")
|
280 |
+
val_metadata = MetadataCatalog.get("boardetect_test")
|
281 |
+
|
282 |
+
train_data = ("boardetect_train",)
|
283 |
+
test_data = ("boardetect_test",)
|
284 |
+
if val ==True:
|
285 |
+
val_data = ("boardetect_test",)
|
286 |
+
else:
|
287 |
+
val_data = ("boardetect_train",)
|
288 |
+
|
289 |
+
|
290 |
+
|
291 |
+
class CocoTrainer(DefaultTrainer):
|
292 |
+
|
293 |
+
@classmethod
|
294 |
+
def build_evaluator(cls, cfg, dataset_name, output_folder=None):
|
295 |
+
if output_folder is None:
|
296 |
+
os.makedirs(cfg.OUTPUT_DIR, exist_ok=True)
|
297 |
+
output_folder = cfg.OUTPUT_DIR
|
298 |
+
|
299 |
+
|
300 |
+
return COCOEvaluator(dataset_name, cfg, False, output_folder)
|
301 |
+
|
302 |
+
cfg = get_cfg()
|
303 |
+
cfg.MODEL.DEVICE = 'cuda:0'
|
304 |
+
cfg.merge_from_file(model_zoo.get_config_file("COCO-Detection/retinanet_R_101_FPN_3x.yaml")) #Get the basic model configuration from the model zoo
|
305 |
+
#Passing the Train and Validation sets
|
306 |
+
cfg.DATASETS.TRAIN = train_data #("boardetect_train",)
|
307 |
+
cfg.DATASETS.TEST = test_data #("boardetect_train",)
|
308 |
+
cfg.OUTPUT_DIR = output_dir #("comparison-optical-flow")
|
309 |
+
# Number of data loading threads
|
310 |
+
cfg.DATALOADER.NUM_WORKERS = 4
|
311 |
+
#cfg.MODEL.WEIGHTS = model_zoo.get_checkpoint_url("COCO-Detection/faster_rcnn_R_50_FPN_3x.yaml") # Let training initialize from model zoo
|
312 |
+
#cfg.MODEL.WEIGHTS = os.path.join(cfg.OUTPUT_DIR, "model_final.pth") #uncommwnt during inference
|
313 |
+
cfg.MODEL.WEIGHTS = os.path.join(cfg.OUTPUT_DIR, "model_final.pth") #uncommwnt during inference
|
314 |
+
#cfg.MODEL.WEIGHTS = os.path.join(cfg.OUTPUT_DIR, "model_0029999.pth") #uncommwnt during inference
|
315 |
+
# Number of images per batch across all machines.
|
316 |
+
cfg.SOLVER.IMS_PER_BATCH = 4
|
317 |
+
cfg.SOLVER.BASE_LR = float(args.lr) #0.0125 # pick a good LearningRate
|
318 |
+
cfg.SOLVER.MAX_ITER = int(args.epochs) #30000 #No. of iterations
|
319 |
+
# cfg.SOLVER.STEPS = (300,600)
|
320 |
+
cfg.MODEL.ROI_HEADS.BATCH_SIZE_PER_IMAGE = 80
|
321 |
+
cfg.MODEL.ROI_HEADS.NUM_CLASSES = 1
|
322 |
+
cfg.TEST.EVAL_PERIOD = 500 # No. of iterations after which the Validation Set is evaluated.
|
323 |
+
cfg.TEST.DETECTIONS_PER_IMAGE = 60
|
324 |
+
cfg.SOLVER.CHECKPOINT_PERIOD = 500
|
325 |
+
cfg.VIS_PERIOD = 500
|
326 |
+
cfg.DATASETS.PRECOMPUTED_PROPOSAL_TOPK_TRAIN = 100
|
327 |
+
cfg.DATASETS.PRECOMPUTED_PROPOSAL_TOPK_TEST = 100
|
328 |
+
cfg.MODEL.RPN.POST_NMS_TOPK_TRAIN = 80 #80
|
329 |
+
cfg.MODEL.RPN.POST_NMS_TOPK_TEST = 80
|
330 |
+
os.makedirs(cfg.OUTPUT_DIR, exist_ok=True)
|
331 |
+
trainer = CocoTrainer(cfg)
|
332 |
+
trainer.resume_or_load(resume=True) #resume=True for inference
|
333 |
+
trainer.train()
|
334 |
+
print(cfg.MODEL.WEIGHTS)
|
335 |
+
|
336 |
+
os.system(f'cp {output_dir}/coco_instances_results.json {dest_test}/coco_instances_results_{args.annotations_test}.json')
|
337 |
+
os.system(f'cp {output_dir}/boardetect_test_coco_format.json {dest_test}/boardetect_test_coco_format_{args.annotations_test}.json')
|
338 |
+
os.system(f'cp {source}/test.json {dest_test}/test_{args.annotations_test}.json')
|
MEMTrack/src/inferenceBacteriaRetinanet_Motility_v2.py
ADDED
@@ -0,0 +1,230 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
# Some basic setup:
|
2 |
+
# Setup detectron2 logger
|
3 |
+
import detectron2
|
4 |
+
from detectron2.utils.logger import setup_logger
|
5 |
+
setup_logger()
|
6 |
+
|
7 |
+
# import some common libraries
|
8 |
+
import numpy as np
|
9 |
+
import os, json, cv2, random
|
10 |
+
import cv2
|
11 |
+
import pandas as pd
|
12 |
+
import shutil
|
13 |
+
import numpy as np
|
14 |
+
|
15 |
+
# import some common detectron2 utilities
|
16 |
+
from detectron2 import model_zoo
|
17 |
+
from detectron2.engine import DefaultPredictor
|
18 |
+
from detectron2.config import get_cfg
|
19 |
+
from detectron2.utils.visualizer import Visualizer
|
20 |
+
from detectron2.data import MetadataCatalog, DatasetCatalog
|
21 |
+
import json
|
22 |
+
from detectron2.structures import BoxMode
|
23 |
+
from detectron2.data import DatasetCatalog, MetadataCatalog
|
24 |
+
import argparse
|
25 |
+
from natsort import natsorted
|
26 |
+
import PIL
|
27 |
+
from PIL import Image
|
28 |
+
import matplotlib.pyplot as plt
|
29 |
+
from detectron2.engine import DefaultTrainer
|
30 |
+
from detectron2.evaluation import COCOEvaluator
|
31 |
+
from detectron2.config import get_cfg
|
32 |
+
from detectron2.evaluation import inference_on_dataset
|
33 |
+
from detectron2.data import build_detection_test_loader
|
34 |
+
import os
|
35 |
+
from contextlib import suppress
|
36 |
+
|
37 |
+
|
38 |
+
def register_dataset_(annotations_test, images_test, dest_test):
|
39 |
+
factor_w = 1#1024/1388
|
40 |
+
factor_h = 1#1024/1040
|
41 |
+
|
42 |
+
coco_format_test = pd.DataFrame(columns=["file_name","height","width","annotations"])
|
43 |
+
coco_format_test["annotations"] = coco_format_test["annotations"].astype('object')
|
44 |
+
|
45 |
+
|
46 |
+
for txt_file in natsorted(os.listdir(annotations_test)):
|
47 |
+
width = 31
|
48 |
+
text_file = open(annotations_test + txt_file, 'r')
|
49 |
+
xy_coords = text_file.readlines()
|
50 |
+
boxes = []
|
51 |
+
res=pd.DataFrame(columns=["file_name","height","width","annotations"])
|
52 |
+
image = PIL.Image.open(images_test + txt_file[:-4] + ".tif").convert('L')
|
53 |
+
image_feature = PIL.Image.open(images_test + txt_file[:-4] + ".tif")
|
54 |
+
image = image_feature
|
55 |
+
#print(image.size)
|
56 |
+
res.at[0,"height"] = image.height
|
57 |
+
res.at[0,"width"] = image.width
|
58 |
+
res.at[0,"file_name"] = txt_file[:-4]+".tif"
|
59 |
+
bbox_mode = 0
|
60 |
+
category_id = 0
|
61 |
+
# image2 = image.resize((1024,1024))
|
62 |
+
# image2.save(images_resized_train + txt_file[:-4] + ".jpg")
|
63 |
+
for xy in xy_coords:
|
64 |
+
box = []
|
65 |
+
x = float(xy.split(" ")[0])
|
66 |
+
y = float(xy.split(" ")[1])
|
67 |
+
x1 = int(x*factor_w - (width // 2))
|
68 |
+
y1 = int(y*factor_h - (width // 2))
|
69 |
+
x2 = int(x*factor_w + (width // 2))
|
70 |
+
y2 = int(y*factor_h + (width // 2))
|
71 |
+
w = h = 31
|
72 |
+
box = [x1, y1, x2, y2]
|
73 |
+
boxes.append(np.array(box))
|
74 |
+
#print(np.array(box))
|
75 |
+
|
76 |
+
res["annotations"]=res["annotations"].astype('object')
|
77 |
+
annotation_df = pd.DataFrame(columns=["bbox","bbox_mode","category_id"])
|
78 |
+
annotation_df["bbox"] = boxes
|
79 |
+
annotation_df["bbox_mode"] = bbox_mode
|
80 |
+
annotation_df["category_id"] = category_id
|
81 |
+
annotations = annotation_df.T.to_dict().values()
|
82 |
+
l = []
|
83 |
+
for j in annotations:
|
84 |
+
l.append(j)
|
85 |
+
res.at[0,"annotations"] = l
|
86 |
+
coco_format_test = coco_format_test.append(res)
|
87 |
+
coco_format_test.reset_index(drop=True,inplace=True)
|
88 |
+
|
89 |
+
coco_format_test.reset_index(inplace=True)
|
90 |
+
coco_format_test.rename(columns={"index":"image_id"},inplace=True)
|
91 |
+
coco_format_test.to_json(dest_test + "test.json",orient="records")
|
92 |
+
|
93 |
+
|
94 |
+
def get_board_dicts(imgdir, mode):
|
95 |
+
if mode == 'test':
|
96 |
+
json_file = imgdir+"/test.json" #Fetch the json file
|
97 |
+
with open(json_file) as f:
|
98 |
+
dataset_dicts = json.load(f)
|
99 |
+
for i in dataset_dicts:
|
100 |
+
filename = i["file_name"]
|
101 |
+
if mode == 'test':
|
102 |
+
i["file_name"] = images_test + filename
|
103 |
+
for j in i["annotations"]:
|
104 |
+
j["bbox_mode"] = BoxMode.XYXY_ABS #Setting the required Box Mode
|
105 |
+
j["category_id"] = int(j["category_id"])
|
106 |
+
return dataset_dicts
|
107 |
+
|
108 |
+
#Registering the Dataset
|
109 |
+
for d in ["test"]:
|
110 |
+
DatasetCatalog.register("boardetect_" + d, lambda d=d: get_board_dicts(dest_test, d))
|
111 |
+
MetadataCatalog.get("boardetect_" + d).set(thing_classes=["node"])
|
112 |
+
|
113 |
+
test_metadata = MetadataCatalog.get("boardetect_test")
|
114 |
+
|
115 |
+
test_data = ("boardetect_test",)
|
116 |
+
return
|
117 |
+
|
118 |
+
def run_inference(video_num, output_dir, feature="images_feature/", annotations_test="Easy", test_dir=None, custom_test_dir=None, register_dataset=True):
|
119 |
+
annotations_test_args = annotations_test
|
120 |
+
if custom_test_dir:
|
121 |
+
dest_test = custom_test_dir
|
122 |
+
else:
|
123 |
+
dest_test = test_dir + f"/data_video{video_num}_feature_optical_flow_median_back_2pyr_18win_background_img/test/"
|
124 |
+
|
125 |
+
val_image_path = "/val" + feature
|
126 |
+
|
127 |
+
if os.path.exists(output_dir +'/boardetect_test_coco_format.json.lock'):
|
128 |
+
os.remove(output_dir +'/boardetect_test_coco_format.json.lock')
|
129 |
+
if os.path.exists(output_dir +'/boardetect_test_coco_format.json'):
|
130 |
+
os.remove(output_dir +'/boardetect_test_coco_format.json')
|
131 |
+
if os.path.exists(output_dir +'/coco_instances_results.json'):
|
132 |
+
os.remove(output_dir +'/coco_instances_results.json')
|
133 |
+
# if os.path.exists(dest_test + 'test.json'):
|
134 |
+
# os.remove(dest_test + 'test.json')
|
135 |
+
|
136 |
+
print("output_dir: ", output_dir)
|
137 |
+
print("dest_test: ", dest_test)
|
138 |
+
|
139 |
+
|
140 |
+
|
141 |
+
if annotations_test_args == "Easy":
|
142 |
+
annotations_test = dest_test + "annotation_easy/"
|
143 |
+
elif annotations_test_args == "Hard":
|
144 |
+
annotations_test = dest_test + "annotation_hard/"
|
145 |
+
elif annotations_test_args == "VeryHard":
|
146 |
+
annotations_test = dest_test + "annotation_veryhard/"
|
147 |
+
elif annotations_test_args == "Easy+Hard":
|
148 |
+
annotations_test = dest_test + "annotation_easy_hard/"
|
149 |
+
elif annotations_test_args == "All":
|
150 |
+
annotations_test = dest_test + "annotation_easy_hard_veryhard/"
|
151 |
+
elif annotations_test_args == "Motility-low":
|
152 |
+
annotations_test = dest_test + "annotation_motility_low/"
|
153 |
+
elif annotations_test_args == "Motility-high":
|
154 |
+
annotations_test = dest_test + "annotation_motility_high/"
|
155 |
+
elif annotations_test_args == "Motility-wiggle":
|
156 |
+
annotations_test = dest_test + "annotation_motility_wiggle/"
|
157 |
+
elif annotations_test_args == "Motility-mid":
|
158 |
+
annotations_test = dest_test + "annotation_motility_mid/"
|
159 |
+
elif annotations_test_args == "Motility-motile":
|
160 |
+
annotations_test = dest_test + "annotation_motility_wiggle_mid_high/"
|
161 |
+
elif annotations_test_args == "Sticking-stick":
|
162 |
+
annotations_test = dest_test + "annotation_sticking_stick/"
|
163 |
+
elif annotations_test_args == "Sticking-motile":
|
164 |
+
annotations_test = dest_test + "annotation_sticking_motile/"
|
165 |
+
elif annotations_test_args == "Sticking-non_motile":
|
166 |
+
annotations_test = dest_test + "annotation_sticking_non_motile/"
|
167 |
+
elif annotations_test_args == "Motility-low-wiggle":
|
168 |
+
annotations_test = dest_test + "annotation_motility_low_wiggle/"
|
169 |
+
elif annotations_test_args == "Motility-mid-high":
|
170 |
+
annotations_test = dest_test + "annotation_motility_mid_high/"
|
171 |
+
|
172 |
+
|
173 |
+
#To test a particular bacteria
|
174 |
+
images_test = dest_test + feature #"/images_feature/"
|
175 |
+
test_image_path = images_test
|
176 |
+
|
177 |
+
if register_dataset:
|
178 |
+
register_dataset_(annotations_test, images_test, dest_test)
|
179 |
+
|
180 |
+
cfg = get_cfg()
|
181 |
+
cfg.MODEL.DEVICE = 'cpu'
|
182 |
+
cfg.merge_from_file(model_zoo.get_config_file("COCO-Detection/retinanet_R_101_FPN_3x.yaml")) #Get the basic model configuration from the model zoo
|
183 |
+
cfg.DATASETS.TEST = ("boardetect_test",) #("boardetect_train",)
|
184 |
+
cfg.OUTPUT_DIR = output_dir #("comparison-optical-flow")
|
185 |
+
# Number of data loading threads
|
186 |
+
cfg.DATALOADER.NUM_WORKERS = 4
|
187 |
+
cfg.MODEL.WEIGHTS = os.path.join(cfg.OUTPUT_DIR, "model_0089999.pth") #uncommwnt during inference
|
188 |
+
#cfg.MODEL.WEIGHTS = os.path.join(cfg.OUTPUT_DIR, "model_0029999.pth") #uncommwnt during inference
|
189 |
+
# Number of images per batch across all machines.
|
190 |
+
cfg.SOLVER.IMS_PER_BATCH = 8
|
191 |
+
# cfg.SOLVER.STEPS = (300,600)
|
192 |
+
cfg.TEST.DETECTIONS_PER_IMAGE = 60
|
193 |
+
os.makedirs(cfg.OUTPUT_DIR, exist_ok=True)
|
194 |
+
print(cfg.MODEL.WEIGHTS)
|
195 |
+
|
196 |
+
predictor = DefaultPredictor(cfg)
|
197 |
+
evaluator = COCOEvaluator("boardetect_test", cfg, False, output_dir)
|
198 |
+
val_loader = build_detection_test_loader(cfg, "boardetect_test")
|
199 |
+
print(inference_on_dataset(predictor.model, val_loader, evaluator))
|
200 |
+
|
201 |
+
if os.path.exists(os.path.join(output_dir, "coco_instances_results.json")):
|
202 |
+
shutil.copy(os.path.join(output_dir, "coco_instances_results.json"), os.path.join(dest_test, f"coco_instances_results_{annotations_test_args}.json"))
|
203 |
+
|
204 |
+
if os.path.exists(os.path.join(output_dir, "boardetect_test_coco_format.json")):
|
205 |
+
shutil.copy(os.path.join(output_dir, "boardetect_test_coco_format.json"), os.path.join(dest_test, f"boardetect_test_coco_format_{annotations_test_args}.json"))
|
206 |
+
|
207 |
+
if os.path.exists(os.path.join(dest_test, "test.json")):
|
208 |
+
shutil.copy(os.path.join(dest_test, "test.json"), os.path.join(dest_test, f"test_{annotations_test_args}.json"))
|
209 |
+
return ("Inference done")
|
210 |
+
|
211 |
+
if __name__ == "__main__":
|
212 |
+
|
213 |
+
ap = argparse.ArgumentParser(description='Inference')
|
214 |
+
ap.add_argument('--video', default='1', type=str, metavar='PATH')
|
215 |
+
ap.add_argument('--output_dir', type=str, metavar='CELL PATH')
|
216 |
+
ap.add_argument('--test_dir', type=str, metavar='CELL PATH')
|
217 |
+
ap.add_argument('--annotations_test', default="Easy", type=str, metavar='TEST')
|
218 |
+
ap.add_argument('--custom_test_dir', type=str, metavar='CELL PATH')
|
219 |
+
ap.add_argument('--feature', default="images_feature/", type=str, metavar='FEATURE')
|
220 |
+
|
221 |
+
args = ap.parse_args()
|
222 |
+
video_num = args.video
|
223 |
+
test_dir = args.test_dir
|
224 |
+
custom_test_dir = args.custom_test_dir
|
225 |
+
output_dir = args.output_dir
|
226 |
+
feature = args.feature
|
227 |
+
args.annotations_test = annotations_test
|
228 |
+
|
229 |
+
run_inference(video_num, output_dir, feature, annotations_test, test_dir, custom_test_dir)
|
230 |
+
|
MEMTrack/src/sort.py
ADDED
@@ -0,0 +1,332 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
"""
|
2 |
+
SORT: A Simple, Online and Realtime Tracker
|
3 |
+
Copyright (C) 2016-2020 Alex Bewley [email protected]
|
4 |
+
|
5 |
+
This program is free software: you can redistribute it and/or modify
|
6 |
+
it under the terms of the GNU General Public License as published by
|
7 |
+
the Free Software Foundation, either version 3 of the License, or
|
8 |
+
(at your option) any later version.
|
9 |
+
|
10 |
+
This program is distributed in the hope that it will be useful,
|
11 |
+
but WITHOUT ANY WARRANTY; without even the implied warranty of
|
12 |
+
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
13 |
+
GNU General Public License for more details.
|
14 |
+
|
15 |
+
You should have received a copy of the GNU General Public License
|
16 |
+
along with this program. If not, see <http://www.gnu.org/licenses/>.
|
17 |
+
"""
|
18 |
+
from __future__ import print_function
|
19 |
+
|
20 |
+
import os
|
21 |
+
import numpy as np
|
22 |
+
import matplotlib
|
23 |
+
#matplotlib.use('TkAgg')
|
24 |
+
import matplotlib.pyplot as plt
|
25 |
+
import matplotlib.patches as patches
|
26 |
+
from skimage import io
|
27 |
+
|
28 |
+
import glob
|
29 |
+
import time
|
30 |
+
import argparse
|
31 |
+
from filterpy.kalman import KalmanFilter
|
32 |
+
|
33 |
+
np.random.seed(0)
|
34 |
+
|
35 |
+
|
36 |
+
def linear_assignment(cost_matrix):
|
37 |
+
try:
|
38 |
+
import lap
|
39 |
+
_, x, y = lap.lapjv(cost_matrix, extend_cost=True)
|
40 |
+
return np.array([[y[i],i] for i in x if i >= 0]) #
|
41 |
+
except ImportError:
|
42 |
+
from scipy.optimize import linear_sum_assignment
|
43 |
+
x, y = linear_sum_assignment(cost_matrix)
|
44 |
+
return np.array(list(zip(x, y)))
|
45 |
+
|
46 |
+
|
47 |
+
def iou_batch(bb_test, bb_gt):
|
48 |
+
"""
|
49 |
+
From SORT: Computes IOU between two bboxes in the form [x1,y1,x2,y2]
|
50 |
+
"""
|
51 |
+
bb_gt = np.expand_dims(bb_gt, 0)
|
52 |
+
bb_test = np.expand_dims(bb_test, 1)
|
53 |
+
|
54 |
+
xx1 = np.maximum(bb_test[..., 0], bb_gt[..., 0])
|
55 |
+
yy1 = np.maximum(bb_test[..., 1], bb_gt[..., 1])
|
56 |
+
xx2 = np.minimum(bb_test[..., 2], bb_gt[..., 2])
|
57 |
+
yy2 = np.minimum(bb_test[..., 3], bb_gt[..., 3])
|
58 |
+
w = np.maximum(0., xx2 - xx1)
|
59 |
+
h = np.maximum(0., yy2 - yy1)
|
60 |
+
wh = w * h
|
61 |
+
o = wh / ((bb_test[..., 2] - bb_test[..., 0]) * (bb_test[..., 3] - bb_test[..., 1])
|
62 |
+
+ (bb_gt[..., 2] - bb_gt[..., 0]) * (bb_gt[..., 3] - bb_gt[..., 1]) - wh)
|
63 |
+
return(o)
|
64 |
+
|
65 |
+
|
66 |
+
def convert_bbox_to_z(bbox):
|
67 |
+
"""
|
68 |
+
Takes a bounding box in the form [x1,y1,x2,y2] and returns z in the form
|
69 |
+
[x,y,s,r] where x,y is the centre of the box and s is the scale/area and r is
|
70 |
+
the aspect ratio
|
71 |
+
"""
|
72 |
+
w = bbox[2] - bbox[0]
|
73 |
+
h = bbox[3] - bbox[1]
|
74 |
+
x = bbox[0] + w/2.
|
75 |
+
y = bbox[1] + h/2.
|
76 |
+
s = w * h #scale is just area
|
77 |
+
r = w / float(h)
|
78 |
+
return np.array([x, y, s, r]).reshape((4, 1))
|
79 |
+
|
80 |
+
|
81 |
+
def convert_x_to_bbox(x,score=None):
|
82 |
+
"""
|
83 |
+
Takes a bounding box in the centre form [x,y,s,r] and returns it in the form
|
84 |
+
[x1,y1,x2,y2] where x1,y1 is the top left and x2,y2 is the bottom right
|
85 |
+
"""
|
86 |
+
w = np.sqrt(x[2] * x[3])
|
87 |
+
h = x[2] / w
|
88 |
+
if(score==None):
|
89 |
+
return np.array([x[0]-w/2.,x[1]-h/2.,x[0]+w/2.,x[1]+h/2.]).reshape((1,4))
|
90 |
+
else:
|
91 |
+
return np.array([x[0]-w/2.,x[1]-h/2.,x[0]+w/2.,x[1]+h/2.,score]).reshape((1,5))
|
92 |
+
|
93 |
+
|
94 |
+
class KalmanBoxTracker(object):
|
95 |
+
"""
|
96 |
+
This class represents the internal state of individual tracked objects observed as bbox.
|
97 |
+
"""
|
98 |
+
count = 0
|
99 |
+
def __init__(self,bbox):
|
100 |
+
"""
|
101 |
+
Initialises a tracker using initial bounding box.
|
102 |
+
"""
|
103 |
+
#define constant velocity model
|
104 |
+
self.kf = KalmanFilter(dim_x=7, dim_z=4)
|
105 |
+
self.kf.F = np.array([[1,0,0,0,1,0,0],[0,1,0,0,0,1,0],[0,0,1,0,0,0,1],[0,0,0,1,0,0,0], [0,0,0,0,1,0,0],[0,0,0,0,0,1,0],[0,0,0,0,0,0,1]])
|
106 |
+
self.kf.H = np.array([[1,0,0,0,0,0,0],[0,1,0,0,0,0,0],[0,0,1,0,0,0,0],[0,0,0,1,0,0,0]])
|
107 |
+
|
108 |
+
self.kf.R[2:,2:] *= 10.
|
109 |
+
self.kf.P[4:,4:] *= 1000. #give high uncertainty to the unobservable initial velocities
|
110 |
+
self.kf.P *= 10.
|
111 |
+
self.kf.Q[-1,-1] *= 0.01
|
112 |
+
self.kf.Q[4:,4:] *= 0.01
|
113 |
+
|
114 |
+
self.kf.x[:4] = convert_bbox_to_z(bbox)
|
115 |
+
self.time_since_update = 0
|
116 |
+
self.id = KalmanBoxTracker.count
|
117 |
+
KalmanBoxTracker.count += 1
|
118 |
+
self.history = []
|
119 |
+
self.hits = 0
|
120 |
+
self.hit_streak = 0
|
121 |
+
self.age = 0
|
122 |
+
|
123 |
+
def update(self,bbox):
|
124 |
+
"""
|
125 |
+
Updates the state vector with observed bbox.
|
126 |
+
"""
|
127 |
+
self.time_since_update = 0
|
128 |
+
self.history = []
|
129 |
+
self.hits += 1
|
130 |
+
self.hit_streak += 1
|
131 |
+
self.kf.update(convert_bbox_to_z(bbox))
|
132 |
+
|
133 |
+
def predict(self):
|
134 |
+
"""
|
135 |
+
Advances the state vector and returns the predicted bounding box estimate.
|
136 |
+
"""
|
137 |
+
if((self.kf.x[6]+self.kf.x[2])<=0):
|
138 |
+
self.kf.x[6] *= 0.0
|
139 |
+
self.kf.predict()
|
140 |
+
self.age += 1
|
141 |
+
if(self.time_since_update>0):
|
142 |
+
self.hit_streak = 0
|
143 |
+
self.time_since_update += 1
|
144 |
+
self.history.append(convert_x_to_bbox(self.kf.x))
|
145 |
+
return self.history[-1]
|
146 |
+
|
147 |
+
def get_state(self):
|
148 |
+
"""
|
149 |
+
Returns the current bounding box estimate.
|
150 |
+
"""
|
151 |
+
return convert_x_to_bbox(self.kf.x)
|
152 |
+
|
153 |
+
|
154 |
+
def associate_detections_to_trackers(detections,trackers,iou_threshold = 0.3):
|
155 |
+
"""
|
156 |
+
Assigns detections to tracked object (both represented as bounding boxes)
|
157 |
+
|
158 |
+
Returns 3 lists of matches, unmatched_detections and unmatched_trackers
|
159 |
+
"""
|
160 |
+
if(len(trackers)==0):
|
161 |
+
return np.empty((0,2),dtype=int), np.arange(len(detections)), np.empty((0,5),dtype=int)
|
162 |
+
|
163 |
+
iou_matrix = iou_batch(detections, trackers)
|
164 |
+
|
165 |
+
if min(iou_matrix.shape) > 0:
|
166 |
+
a = (iou_matrix > iou_threshold).astype(np.int32)
|
167 |
+
if a.sum(1).max() == 1 and a.sum(0).max() == 1:
|
168 |
+
matched_indices = np.stack(np.where(a), axis=1)
|
169 |
+
else:
|
170 |
+
matched_indices = linear_assignment(-iou_matrix)
|
171 |
+
else:
|
172 |
+
matched_indices = np.empty(shape=(0,2))
|
173 |
+
|
174 |
+
unmatched_detections = []
|
175 |
+
for d, det in enumerate(detections):
|
176 |
+
if(d not in matched_indices[:,0]):
|
177 |
+
unmatched_detections.append(d)
|
178 |
+
unmatched_trackers = []
|
179 |
+
for t, trk in enumerate(trackers):
|
180 |
+
if(t not in matched_indices[:,1]):
|
181 |
+
unmatched_trackers.append(t)
|
182 |
+
|
183 |
+
#filter out matched with low IOU
|
184 |
+
matches = []
|
185 |
+
for m in matched_indices:
|
186 |
+
if(iou_matrix[m[0], m[1]]<iou_threshold):
|
187 |
+
unmatched_detections.append(m[0])
|
188 |
+
unmatched_trackers.append(m[1])
|
189 |
+
else:
|
190 |
+
matches.append(m.reshape(1,2))
|
191 |
+
if(len(matches)==0):
|
192 |
+
matches = np.empty((0,2),dtype=int)
|
193 |
+
else:
|
194 |
+
matches = np.concatenate(matches,axis=0)
|
195 |
+
|
196 |
+
return matches, np.array(unmatched_detections), np.array(unmatched_trackers)
|
197 |
+
|
198 |
+
|
199 |
+
class Sort(object):
|
200 |
+
def __init__(self, max_age=1, min_hits=3, iou_threshold=0.3, max_interpolation=1):
|
201 |
+
"""
|
202 |
+
Sets key parameters for SORT
|
203 |
+
"""
|
204 |
+
self.max_age = max_age
|
205 |
+
self.min_hits = min_hits
|
206 |
+
self.iou_threshold = iou_threshold
|
207 |
+
self.trackers = []
|
208 |
+
self.frame_count = 0
|
209 |
+
self.max_interpolation = max_interpolation
|
210 |
+
|
211 |
+
def update(self, dets=np.empty((0, 5))):
|
212 |
+
"""
|
213 |
+
Params:
|
214 |
+
dets - a numpy array of detections in the format [[x1,y1,x2,y2,score],[x1,y1,x2,y2,score],...]
|
215 |
+
Requires: this method must be called once for each frame even with empty detections (use np.empty((0, 5)) for frames without detections).
|
216 |
+
Returns the a similar array, where the last column is the object ID.
|
217 |
+
|
218 |
+
NOTE: The number of objects returned may differ from the number of detections provided.
|
219 |
+
"""
|
220 |
+
self.frame_count += 1
|
221 |
+
# get predicted locations from existing trackers.
|
222 |
+
trks = np.zeros((len(self.trackers), 5))
|
223 |
+
to_del = []
|
224 |
+
ret = []
|
225 |
+
for t, trk in enumerate(trks):
|
226 |
+
pos = self.trackers[t].predict()[0]
|
227 |
+
trk[:] = [pos[0], pos[1], pos[2], pos[3], 0]
|
228 |
+
if np.any(np.isnan(pos)):
|
229 |
+
to_del.append(t)
|
230 |
+
trks = np.ma.compress_rows(np.ma.masked_invalid(trks))
|
231 |
+
for t in reversed(to_del):
|
232 |
+
self.trackers.pop(t)
|
233 |
+
matched, unmatched_dets, unmatched_trks = associate_detections_to_trackers(dets,trks, self.iou_threshold)
|
234 |
+
|
235 |
+
# update matched trackers with assigned detections
|
236 |
+
for m in matched:
|
237 |
+
self.trackers[m[1]].update(dets[m[0], :])
|
238 |
+
|
239 |
+
# create and initialise new trackers for unmatched detections
|
240 |
+
for i in unmatched_dets:
|
241 |
+
trk = KalmanBoxTracker(dets[i,:])
|
242 |
+
self.trackers.append(trk)
|
243 |
+
i = len(self.trackers)
|
244 |
+
for trk in reversed(self.trackers):
|
245 |
+
d = trk.get_state()[0]
|
246 |
+
# updated (trk.time_since_update < 1) to (trk.time_since_update < self.max_age) --->Medha
|
247 |
+
if (trk.time_since_update < self.max_age) and (trk.hit_streak >= self.min_hits or self.frame_count <= self.min_hits):
|
248 |
+
ret.append(np.concatenate((d,[trk.id+1])).reshape(1,-1)) # +1 as MOT benchmark requires positive
|
249 |
+
i -= 1
|
250 |
+
# remove dead tracklet
|
251 |
+
if(trk.time_since_update > self.max_age):
|
252 |
+
self.trackers.pop(i)
|
253 |
+
if(len(ret)>0):
|
254 |
+
return np.concatenate(ret)
|
255 |
+
return np.empty((0,5))
|
256 |
+
|
257 |
+
def parse_args():
|
258 |
+
"""Parse input arguments."""
|
259 |
+
parser = argparse.ArgumentParser(description='SORT demo')
|
260 |
+
parser.add_argument('--display', dest='display', help='Display online tracker output (slow) [False]',action='store_true')
|
261 |
+
parser.add_argument("--seq_path", help="Path to detections.", type=str, default='data')
|
262 |
+
parser.add_argument("--phase", help="Subdirectory in seq_path.", type=str, default='train')
|
263 |
+
parser.add_argument("--max_age",
|
264 |
+
help="Maximum number of frames to keep alive a track without associated detections.",
|
265 |
+
type=int, default=1)
|
266 |
+
parser.add_argument("--min_hits",
|
267 |
+
help="Minimum number of associated detections before track is initialised.",
|
268 |
+
type=int, default=3)
|
269 |
+
parser.add_argument("--iou_threshold", help="Minimum IOU for match.", type=float, default=0.3)
|
270 |
+
args = parser.parse_args()
|
271 |
+
return args
|
272 |
+
|
273 |
+
if __name__ == '__main__':
|
274 |
+
# all train
|
275 |
+
args = parse_args()
|
276 |
+
display = args.display
|
277 |
+
phase = args.phase
|
278 |
+
total_time = 0.0
|
279 |
+
total_frames = 0
|
280 |
+
colours = np.random.rand(32, 3) #used only for display
|
281 |
+
if(display):
|
282 |
+
if not os.path.exists('mot_benchmark'):
|
283 |
+
print('\n\tERROR: mot_benchmark link not found!\n\n Create a symbolic link to the MOT benchmark\n (https://motchallenge.net/data/2D_MOT_2015/#download). E.g.:\n\n $ ln -s /path/to/MOT2015_challenge/2DMOT2015 mot_benchmark\n\n')
|
284 |
+
exit()
|
285 |
+
plt.ion()
|
286 |
+
fig = plt.figure()
|
287 |
+
ax1 = fig.add_subplot(111, aspect='equal')
|
288 |
+
|
289 |
+
if not os.path.exists('output'):
|
290 |
+
os.makedirs('output')
|
291 |
+
pattern = os.path.join(args.seq_path, phase, '*', 'det', 'det.txt')
|
292 |
+
for seq_dets_fn in glob.glob(pattern):
|
293 |
+
mot_tracker = Sort(max_age=args.max_age,
|
294 |
+
min_hits=args.min_hits,
|
295 |
+
iou_threshold=args.iou_threshold) #create instance of the SORT tracker
|
296 |
+
seq_dets = np.loadtxt(seq_dets_fn, delimiter=',')
|
297 |
+
seq = seq_dets_fn[pattern.find('*'):].split(os.path.sep)[0]
|
298 |
+
|
299 |
+
with open(os.path.join('output', '%s.txt'%(seq)),'w') as out_file:
|
300 |
+
print("Processing %s."%(seq))
|
301 |
+
for frame in range(int(seq_dets[:,0].max())):
|
302 |
+
frame += 1 #detection and frame numbers begin at 1
|
303 |
+
dets = seq_dets[seq_dets[:, 0]==frame, 2:7]
|
304 |
+
dets[:, 2:4] += dets[:, 0:2] #convert to [x1,y1,w,h] to [x1,y1,x2,y2]
|
305 |
+
total_frames += 1
|
306 |
+
|
307 |
+
if(display):
|
308 |
+
fn = os.path.join('mot_benchmark', phase, seq, 'img1', '%06d.jpg'%(frame))
|
309 |
+
im =io.imread(fn)
|
310 |
+
ax1.imshow(im)
|
311 |
+
plt.title(seq + ' Tracked Targets')
|
312 |
+
|
313 |
+
start_time = time.time()
|
314 |
+
trackers = mot_tracker.update(dets)
|
315 |
+
cycle_time = time.time() - start_time
|
316 |
+
total_time += cycle_time
|
317 |
+
|
318 |
+
for d in trackers:
|
319 |
+
print('%d,%d,%.2f,%.2f,%.2f,%.2f,1,-1,-1,-1'%(frame,d[4],d[0],d[1],d[2]-d[0],d[3]-d[1]),file=out_file)
|
320 |
+
if(display):
|
321 |
+
d = d.astype(np.int32)
|
322 |
+
ax1.add_patch(patches.Rectangle((d[0],d[1]),d[2]-d[0],d[3]-d[1],fill=False,lw=3,ec=colours[d[4]%32,:]))
|
323 |
+
|
324 |
+
if(display):
|
325 |
+
fig.canvas.flush_events()
|
326 |
+
plt.draw()
|
327 |
+
ax1.cla()
|
328 |
+
|
329 |
+
print("Total Tracking took: %.3f seconds for %d frames or %.1f FPS" % (total_time, total_frames, total_frames / total_time))
|
330 |
+
|
331 |
+
if(display):
|
332 |
+
print("Note: to get real runtime results run without the option: --display")
|
MEMTrack/src/trainBacteriaRetinanetMotionData_Motility.py
ADDED
@@ -0,0 +1,318 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
# Some basic setup:
|
2 |
+
# Setup detectron2 logger
|
3 |
+
import detectron2
|
4 |
+
from detectron2.utils.logger import setup_logger
|
5 |
+
setup_logger()
|
6 |
+
|
7 |
+
# import some common libraries
|
8 |
+
import numpy as np
|
9 |
+
import os, json, cv2, random
|
10 |
+
import cv2
|
11 |
+
import pandas as pd
|
12 |
+
import shutil
|
13 |
+
import numpy as np
|
14 |
+
|
15 |
+
# import some common detectron2 utilities
|
16 |
+
from detectron2 import model_zoo
|
17 |
+
from detectron2.engine import DefaultPredictor
|
18 |
+
from detectron2.config import get_cfg
|
19 |
+
from detectron2.utils.visualizer import Visualizer
|
20 |
+
from detectron2.data import MetadataCatalog, DatasetCatalog
|
21 |
+
import json
|
22 |
+
from detectron2.structures import BoxMode
|
23 |
+
from detectron2.data import DatasetCatalog, MetadataCatalog
|
24 |
+
import argparse
|
25 |
+
from natsort import natsorted
|
26 |
+
import PIL
|
27 |
+
from PIL import Image
|
28 |
+
import matplotlib.pyplot as plt
|
29 |
+
from detectron2.engine import DefaultTrainer
|
30 |
+
from detectron2.evaluation import COCOEvaluator
|
31 |
+
from detectron2.config import get_cfg
|
32 |
+
from contextlib import suppress
|
33 |
+
import os
|
34 |
+
|
35 |
+
ap = argparse.ArgumentParser(description='Training')
|
36 |
+
ap.add_argument('--source_path', default="./CombinationModel/data_feature_optical_flow_median_back_2pyr_18win_background_img/", type=str, metavar='PATH')
|
37 |
+
|
38 |
+
ap.add_argument('--output_dir', default="CombinationModel/easy-optical_flow_median_back_2pyr_18win_00125_34k", type=str, metavar='CELL PATH')
|
39 |
+
|
40 |
+
ap.add_argument('--annotations_train', default="Easy", type=str, metavar='TRAIN')
|
41 |
+
ap.add_argument('--annotations_test', default="Easy", type=str, metavar='TEST')
|
42 |
+
ap.add_argument('--bbox_size', default=31, type=int)
|
43 |
+
ap.add_argument('--epochs', default="90", type=str, metavar='TEST')
|
44 |
+
ap.add_argument('--lr', default="0.00125", type=str, metavar='TEST')
|
45 |
+
ap.add_argument('--feature', default="images_feature/", type=str, metavar='FEATURE')
|
46 |
+
ap.add_argument("--cuda", default=0, type=int)
|
47 |
+
|
48 |
+
args = ap.parse_args()
|
49 |
+
|
50 |
+
width = args.bbox_size
|
51 |
+
coco_format_train = pd.DataFrame(columns=["file_name","height","width","annotations"])
|
52 |
+
coco_format_train["annotations"] = coco_format_train["annotations"].astype('object')
|
53 |
+
|
54 |
+
#source = "./CombinationModel/data_feature_optical_flow_median_back_2pyr_18win_background_img/" #update
|
55 |
+
source = args.source_path
|
56 |
+
dest_train = source + "train/"
|
57 |
+
dest_test = source + "val/" #update
|
58 |
+
|
59 |
+
with suppress(OSError):
|
60 |
+
os.remove(source + 'test.json')
|
61 |
+
|
62 |
+
|
63 |
+
val = True #update
|
64 |
+
#output_dir = ("CombinationModel/easy-optical_flow_median_back_2pyr_18win_00125_34k") #update
|
65 |
+
output_dir = args.output_dir
|
66 |
+
train_images_path = "/train/" + args.feature #"/images_feature/"
|
67 |
+
val_image_path = "/val/" + args.feature #"/images_feature/"
|
68 |
+
|
69 |
+
#annotations_source = source + "annotation_easy_hard/"
|
70 |
+
images_source = source + args.feature #"images_feature/"
|
71 |
+
|
72 |
+
if args.annotations_train == "Easy":
|
73 |
+
annotations_train = dest_train + "annotation_easy/"
|
74 |
+
elif args.annotations_train == "Hard":
|
75 |
+
annotations_train = dest_train + "annotation_hard/"
|
76 |
+
elif args.annotations_train == "VeryHard":
|
77 |
+
annotations_train = dest_train + "annotation_veryhard/"
|
78 |
+
elif args.annotations_train == "Easy+Hard":
|
79 |
+
annotations_train = dest_train + "annotation_easy_hard/"
|
80 |
+
elif args.annotations_train == "All":
|
81 |
+
annotations_train = dest_train + "annotation_easy_hard_veryhard/"
|
82 |
+
elif args.annotations_train == "Motility-low":
|
83 |
+
annotations_train = dest_train + "annotation_motility_low/"
|
84 |
+
elif args.annotations_train == "Motility-high":
|
85 |
+
annotations_train = dest_train + "annotation_motility_high/"
|
86 |
+
elif args.annotations_train == "Motility-wiggle":
|
87 |
+
annotations_train = dest_train + "annotation_motility_wiggle/"
|
88 |
+
elif args.annotations_train == "Motility-mid":
|
89 |
+
annotations_train = dest_train + "annotation_motility_mid/"
|
90 |
+
elif args.annotations_train == "Sticking-stick":
|
91 |
+
annotations_train = dest_train + "annotation_sticking_stick/"
|
92 |
+
elif args.annotations_train == "Sticking-motile":
|
93 |
+
annotations_train = dest_train + "annotation_sticking_motile/"
|
94 |
+
elif args.annotations_train == "Sticking-non_motile":
|
95 |
+
annotations_train = dest_train + "annotation_sticking_non_motile/"
|
96 |
+
elif args.annotations_train == "Motility-low-wiggle":
|
97 |
+
annotations_train = dest_train + "annotation_motility_low_wiggle/"
|
98 |
+
elif args.annotations_train == "Motility-mid-high":
|
99 |
+
annotations_train = dest_train + "annotation_motility_mid_high/"
|
100 |
+
|
101 |
+
|
102 |
+
if args.annotations_test == "Easy":
|
103 |
+
annotations_test = dest_test + "annotation_easy/"
|
104 |
+
elif args.annotations_test == "Hard":
|
105 |
+
annotations_test = dest_test + "annotation_hard/"
|
106 |
+
elif args.annotations_test == "VeryHard":
|
107 |
+
annotations_test = dest_test + "annotation_veryhard/"
|
108 |
+
elif args.annotations_test == "Easy+Hard":
|
109 |
+
annotations_test = dest_test + "annotation_easy_hard/"
|
110 |
+
elif args.annotations_test == "All":
|
111 |
+
annotations_test = dest_test + "annotation_easy_hard_veryhard/"
|
112 |
+
elif args.annotations_test == "Motility-low":
|
113 |
+
annotations_test = dest_test + "annotation_motility_low/"
|
114 |
+
elif args.annotations_test == "Motility-high":
|
115 |
+
annotations_test = dest_test + "annotation_motility_high/"
|
116 |
+
elif args.annotations_test == "Motility-wiggle":
|
117 |
+
annotations_test = dest_test + "annotation_motility_wiggle/"
|
118 |
+
elif args.annotations_test == "Motility-mid":
|
119 |
+
annotations_test = dest_test + "annotation_motility_mid/"
|
120 |
+
elif args.annotations_test == "Sticking-stick":
|
121 |
+
annotations_test = dest_test + "annotation_sticking_stick/"
|
122 |
+
elif args.annotations_test == "Sticking-motile":
|
123 |
+
annotations_test = dest_test + "annotation_sticking_motile/"
|
124 |
+
elif args.annotations_test == "Sticking-non_motile":
|
125 |
+
annotations_test = dest_test + "annotation_sticking_non_motile/"
|
126 |
+
elif args.annotations_test == "Motility-low-wiggle":
|
127 |
+
annotations_test = dest_test + "annotation_motility_low_wiggle/"
|
128 |
+
elif args.annotations_test == "Motility-mid-high":
|
129 |
+
annotations_test = dest_test + "annotation_motility_mid_high/"
|
130 |
+
|
131 |
+
|
132 |
+
images_train = dest_train + args.feature #"images_feature/"
|
133 |
+
images_test = dest_test + args.feature #"images_feature/"
|
134 |
+
|
135 |
+
factor_w = 1#1024/1388
|
136 |
+
factor_h = 1#1024/1040
|
137 |
+
|
138 |
+
#function to get background frame
|
139 |
+
#function to get prev frame
|
140 |
+
#function to create new image given image num
|
141 |
+
|
142 |
+
for txt_file in natsorted(os.listdir(annotations_train)):
|
143 |
+
#width = 31
|
144 |
+
text_file = open(annotations_train + txt_file, 'r')
|
145 |
+
xy_coords = text_file.readlines()
|
146 |
+
boxes = []
|
147 |
+
res=pd.DataFrame(columns=["file_name","height","width","annotations"])
|
148 |
+
image = PIL.Image.open(images_train + txt_file[:-4] + ".tif").convert('L')
|
149 |
+
image_feature = PIL.Image.open(images_train + txt_file[:-4] + ".tif")
|
150 |
+
image = image_feature
|
151 |
+
#print(image.size)
|
152 |
+
res.at[0,"height"] = image.height
|
153 |
+
res.at[0,"width"] = image.width
|
154 |
+
res.at[0,"file_name"] = txt_file[:-4]+".tif"
|
155 |
+
bbox_mode = 0
|
156 |
+
category_id = 0
|
157 |
+
# image2 = image.resize((1024,1024))
|
158 |
+
# image2.save(images_resized_train + txt_file[:-4] + ".jpg")
|
159 |
+
for xy in xy_coords:
|
160 |
+
box = []
|
161 |
+
x = float(xy.split(" ")[0])
|
162 |
+
y = float(xy.split(" ")[1])
|
163 |
+
x1 = int(x*factor_w - (width // 2))
|
164 |
+
y1 = int(y*factor_h - (width // 2))
|
165 |
+
x2 = int(x*factor_w + (width // 2))
|
166 |
+
y2 = int(y*factor_h + (width // 2))
|
167 |
+
w = h = 31
|
168 |
+
box = [x1, y1, x2, y2]
|
169 |
+
boxes.append(np.array(box))
|
170 |
+
#print(np.array(box))
|
171 |
+
|
172 |
+
res["annotations"]=res["annotations"].astype('object')
|
173 |
+
annotation_df = pd.DataFrame(columns=["bbox","bbox_mode","category_id"])
|
174 |
+
annotation_df["bbox"] = boxes
|
175 |
+
annotation_df["bbox_mode"] = bbox_mode
|
176 |
+
annotation_df["category_id"] = category_id
|
177 |
+
annotations = annotation_df.T.to_dict().values()
|
178 |
+
l = []
|
179 |
+
for j in annotations:
|
180 |
+
l.append(j)
|
181 |
+
res.at[0,"annotations"] = l
|
182 |
+
coco_format_train = coco_format_train.append(res)
|
183 |
+
coco_format_train.reset_index(drop=True,inplace=True)
|
184 |
+
|
185 |
+
coco_format_train.reset_index(inplace=True)
|
186 |
+
coco_format_train.rename(columns={"index":"image_id"},inplace=True)
|
187 |
+
coco_format_train.to_json(source + "train.json",orient="records")
|
188 |
+
|
189 |
+
coco_format_test = pd.DataFrame(columns=["file_name","height","width","annotations"])
|
190 |
+
coco_format_test["annotations"] = coco_format_test["annotations"].astype('object')
|
191 |
+
|
192 |
+
for txt_file in natsorted(os.listdir(annotations_test)):
|
193 |
+
#width = 31
|
194 |
+
text_file = open(annotations_test + txt_file, 'r')
|
195 |
+
xy_coords = text_file.readlines()
|
196 |
+
boxes = []
|
197 |
+
res=pd.DataFrame(columns=["file_name","height","width","annotations"])
|
198 |
+
image = PIL.Image.open(images_test + txt_file[:-4] + ".tif").convert('L')
|
199 |
+
image_feature = PIL.Image.open(images_test + txt_file[:-4] + ".tif")
|
200 |
+
image = image_feature
|
201 |
+
#print(image.size)
|
202 |
+
res.at[0,"height"] = image.height
|
203 |
+
res.at[0,"width"] = image.width
|
204 |
+
res.at[0,"file_name"] = txt_file[:-4]+".tif"
|
205 |
+
bbox_mode = 0
|
206 |
+
category_id = 0
|
207 |
+
# image2 = image.resize((1024,1024))
|
208 |
+
# image2.save(images_resized_train + txt_file[:-4] + ".jpg")
|
209 |
+
for xy in xy_coords:
|
210 |
+
box = []
|
211 |
+
x = float(xy.split(" ")[0])
|
212 |
+
y = float(xy.split(" ")[1])
|
213 |
+
x1 = int(x*factor_w - (width // 2))
|
214 |
+
y1 = int(y*factor_h - (width // 2))
|
215 |
+
x2 = int(x*factor_w + (width // 2))
|
216 |
+
y2 = int(y*factor_h + (width // 2))
|
217 |
+
w = h = 31
|
218 |
+
box = [x1, y1, x2, y2]
|
219 |
+
boxes.append(np.array(box))
|
220 |
+
#print(np.array(box))
|
221 |
+
|
222 |
+
res["annotations"]=res["annotations"].astype('object')
|
223 |
+
annotation_df = pd.DataFrame(columns=["bbox","bbox_mode","category_id"])
|
224 |
+
annotation_df["bbox"] = boxes
|
225 |
+
annotation_df["bbox_mode"] = bbox_mode
|
226 |
+
annotation_df["category_id"] = category_id
|
227 |
+
annotations = annotation_df.T.to_dict().values()
|
228 |
+
l = []
|
229 |
+
for j in annotations:
|
230 |
+
l.append(j)
|
231 |
+
res.at[0,"annotations"] = l
|
232 |
+
coco_format_test = coco_format_test.append(res)
|
233 |
+
coco_format_test.reset_index(drop=True,inplace=True)
|
234 |
+
|
235 |
+
coco_format_test.reset_index(inplace=True)
|
236 |
+
coco_format_test.rename(columns={"index":"image_id"},inplace=True)
|
237 |
+
coco_format_test.to_json(source + "val.json",orient="records")
|
238 |
+
|
239 |
+
|
240 |
+
|
241 |
+
def get_board_dicts(imgdir, mode):
|
242 |
+
if mode is 'train':
|
243 |
+
json_file = imgdir+"/train.json" #Fetch the json file
|
244 |
+
if mode is 'val':
|
245 |
+
json_file = imgdir+"/val.json" #Fetch the json file
|
246 |
+
with open(json_file) as f:
|
247 |
+
dataset_dicts = json.load(f)
|
248 |
+
for i in dataset_dicts:
|
249 |
+
filename = i["file_name"]
|
250 |
+
if mode is 'train':
|
251 |
+
i["file_name"] = imgdir + train_images_path + filename
|
252 |
+
if mode is 'val':
|
253 |
+
i["file_name"] = imgdir + val_image_path + filename
|
254 |
+
for j in i["annotations"]:
|
255 |
+
j["bbox_mode"] = BoxMode.XYXY_ABS #Setting the required Box Mode
|
256 |
+
j["category_id"] = int(j["category_id"])
|
257 |
+
return dataset_dicts
|
258 |
+
|
259 |
+
#Registering the Dataset
|
260 |
+
for d in ["train", "val"]:
|
261 |
+
DatasetCatalog.register("boardetect_" + d, lambda d=d: get_board_dicts(source, d))
|
262 |
+
MetadataCatalog.get("boardetect_" + d).set(thing_classes=["node"])
|
263 |
+
board_metadata = MetadataCatalog.get("boardetect_train")
|
264 |
+
val_metadata = MetadataCatalog.get("boardetect_val")
|
265 |
+
|
266 |
+
train_data = ("boardetect_train",)
|
267 |
+
|
268 |
+
if val ==True:
|
269 |
+
val_data = ("boardetect_val",)
|
270 |
+
else:
|
271 |
+
val_data = ("boardetect_train",)
|
272 |
+
|
273 |
+
|
274 |
+
|
275 |
+
class CocoTrainer(DefaultTrainer):
|
276 |
+
|
277 |
+
@classmethod
|
278 |
+
def build_evaluator(cls, cfg, dataset_name, output_folder=None):
|
279 |
+
if output_folder is None:
|
280 |
+
os.makedirs(cfg.OUTPUT_DIR, exist_ok=True)
|
281 |
+
output_folder = cfg.OUTPUT_DIR
|
282 |
+
|
283 |
+
|
284 |
+
return COCOEvaluator(dataset_name, cfg, False, output_folder)
|
285 |
+
|
286 |
+
cfg = get_cfg()
|
287 |
+
cfg.MODEL.DEVICE = f"cuda:{args.cuda}"
|
288 |
+
cfg.merge_from_file(model_zoo.get_config_file("COCO-Detection/retinanet_R_101_FPN_3x.yaml")) #Get the basic model configuration from the model zoo
|
289 |
+
#Passing the Train and Validation sets
|
290 |
+
cfg.DATASETS.TRAIN = train_data #("boardetect_train",)
|
291 |
+
cfg.DATASETS.TEST = val_data #("boardetect_train",)
|
292 |
+
cfg.OUTPUT_DIR = output_dir #("comparison-optical-flow")
|
293 |
+
# Number of data loading threads
|
294 |
+
cfg.DATALOADER.NUM_WORKERS = 4
|
295 |
+
#cfg.MODEL.WEIGHTS = model_zoo.get_checkpoint_url("COCO-Detection/faster_rcnn_R_50_FPN_3x.yaml") # Let training initialize from model zoo
|
296 |
+
#cfg.MODEL.WEIGHTS = os.path.join(cfg.OUTPUT_DIR, "model_final.pth") #uncommwnt during inference
|
297 |
+
#cfg.MODEL.WEIGHTS = os.path.join(cfg.OUTPUT_DIR, "model_final.pth") #uncommwnt during inference
|
298 |
+
#cfg.MODEL.WEIGHTS = os.path.join(cfg.OUTPUT_DIR, "model_00044999.pth") #uncommwnt during inference
|
299 |
+
# Number of images per batch across all machines.
|
300 |
+
cfg.SOLVER.IMS_PER_BATCH = 4
|
301 |
+
cfg.SOLVER.BASE_LR = float(args.lr) #0.00125 # pick a good LearningRate
|
302 |
+
cfg.SOLVER.MAX_ITER = int(args.epochs) #90000 #No. of iterations
|
303 |
+
# cfg.SOLVER.STEPS = (300,600)
|
304 |
+
cfg.MODEL.ROI_HEADS.BATCH_SIZE_PER_IMAGE = 80
|
305 |
+
cfg.MODEL.ROI_HEADS.NUM_CLASSES = 1
|
306 |
+
cfg.TEST.EVAL_PERIOD = 500 # No. of iterations after which the Validation Set is evaluated.
|
307 |
+
cfg.TEST.DETECTIONS_PER_IMAGE = 60
|
308 |
+
cfg.SOLVER.CHECKPOINT_PERIOD = 5000
|
309 |
+
cfg.VIS_PERIOD = 500
|
310 |
+
cfg.DATASETS.PRECOMPUTED_PROPOSAL_TOPK_TRAIN = 100
|
311 |
+
cfg.DATASETS.PRECOMPUTED_PROPOSAL_TOPK_TEST = 100
|
312 |
+
cfg.MODEL.RPN.POST_NMS_TOPK_TRAIN = 80 #
|
313 |
+
cfg.MODEL.RPN.POST_NMS_TOPK_TEST = 80
|
314 |
+
os.makedirs(cfg.OUTPUT_DIR, exist_ok=True)
|
315 |
+
trainer = CocoTrainer(cfg)
|
316 |
+
trainer.resume_or_load(resume=True) #resume=True for inference
|
317 |
+
trainer.train()
|
318 |
+
print(cfg.MODEL.WEIGHTS)
|
MEMTrack/src/trainBacteriaRetinanetMotionData_Motility_Val_loss.py
ADDED
@@ -0,0 +1,333 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
# Some basic setup:
|
2 |
+
# Setup detectron2 logger
|
3 |
+
import detectron2
|
4 |
+
from detectron2.utils.logger import setup_logger
|
5 |
+
setup_logger()
|
6 |
+
|
7 |
+
# import some common libraries
|
8 |
+
import numpy as np
|
9 |
+
import os, json, cv2, random
|
10 |
+
import cv2
|
11 |
+
import pandas as pd
|
12 |
+
import shutil
|
13 |
+
import numpy as np
|
14 |
+
|
15 |
+
# import some common detectron2 utilities
|
16 |
+
from detectron2 import model_zoo
|
17 |
+
from detectron2.engine import DefaultPredictor
|
18 |
+
from detectron2.config import get_cfg
|
19 |
+
from detectron2.utils.visualizer import Visualizer
|
20 |
+
from detectron2.data import MetadataCatalog, DatasetCatalog
|
21 |
+
import json
|
22 |
+
from detectron2.structures import BoxMode
|
23 |
+
from detectron2.data import DatasetCatalog, MetadataCatalog
|
24 |
+
import argparse
|
25 |
+
from natsort import natsorted
|
26 |
+
import PIL
|
27 |
+
from PIL import Image
|
28 |
+
import matplotlib.pyplot as plt
|
29 |
+
from detectron2.engine import DefaultTrainer
|
30 |
+
from detectron2.evaluation import COCOEvaluator
|
31 |
+
from detectron2.config import get_cfg
|
32 |
+
from contextlib import suppress
|
33 |
+
import os
|
34 |
+
from LossEvalHook import LossEvalHook
|
35 |
+
from detectron2.data import DatasetMapper, build_detection_test_loader
|
36 |
+
|
37 |
+
ap = argparse.ArgumentParser(description='Training')
|
38 |
+
ap.add_argument('--source_path', default="./CombinationModel/data_feature_optical_flow_median_back_2pyr_18win_background_img/", type=str, metavar='PATH')
|
39 |
+
|
40 |
+
ap.add_argument('--output_dir', default="CombinationModel/easy-optical_flow_median_back_2pyr_18win_00125_34k", type=str, metavar='CELL PATH')
|
41 |
+
|
42 |
+
ap.add_argument('--annotations_train', default="Easy", type=str, metavar='TRAIN')
|
43 |
+
ap.add_argument('--annotations_test', default="Easy", type=str, metavar='TEST')
|
44 |
+
ap.add_argument('--bbox_size', default=31, type=int)
|
45 |
+
ap.add_argument('--epochs', default="90", type=str, metavar='TEST')
|
46 |
+
ap.add_argument('--lr', default="0.00125", type=str, metavar='TEST')
|
47 |
+
ap.add_argument('--feature', default="images_feature/", type=str, metavar='FEATURE')
|
48 |
+
ap.add_argument("--cuda", default=0, type=int)
|
49 |
+
ap.add_argument("--exp", type=str)
|
50 |
+
|
51 |
+
args = ap.parse_args()
|
52 |
+
|
53 |
+
width = args.bbox_size
|
54 |
+
coco_format_train = pd.DataFrame(columns=["file_name","height","width","annotations"])
|
55 |
+
coco_format_train["annotations"] = coco_format_train["annotations"].astype('object')
|
56 |
+
|
57 |
+
#source = "./CombinationModel/data_feature_optical_flow_median_back_2pyr_18win_background_img/" #update
|
58 |
+
source = args.source_path
|
59 |
+
dest_train = source + "train/"
|
60 |
+
dest_test = source + "val/" #update
|
61 |
+
|
62 |
+
with suppress(OSError):
|
63 |
+
os.remove(source + 'test.json')
|
64 |
+
|
65 |
+
|
66 |
+
val = True #update
|
67 |
+
#output_dir = ("CombinationModel/easy-optical_flow_median_back_2pyr_18win_00125_34k") #update
|
68 |
+
output_dir = args.output_dir
|
69 |
+
train_images_path = "/train/" + args.feature #"/images_feature/"
|
70 |
+
val_image_path = "/val/" + args.feature #"/images_feature/"
|
71 |
+
|
72 |
+
#annotations_source = source + "annotation_easy_hard/"
|
73 |
+
images_source = source + args.feature #"images_feature/"
|
74 |
+
|
75 |
+
if args.annotations_train == "Easy":
|
76 |
+
annotations_train = dest_train + "annotation_easy/"
|
77 |
+
elif args.annotations_train == "Hard":
|
78 |
+
annotations_train = dest_train + "annotation_hard/"
|
79 |
+
elif args.annotations_train == "VeryHard":
|
80 |
+
annotations_train = dest_train + "annotation_veryhard/"
|
81 |
+
elif args.annotations_train == "Easy+Hard":
|
82 |
+
annotations_train = dest_train + "annotation_easy_hard/"
|
83 |
+
elif args.annotations_train == "All":
|
84 |
+
annotations_train = dest_train + "annotation_easy_hard_veryhard/"
|
85 |
+
elif args.annotations_train == "Motility-low":
|
86 |
+
annotations_train = dest_train + "annotation_motility_low/"
|
87 |
+
elif args.annotations_train == "Motility-high":
|
88 |
+
annotations_train = dest_train + "annotation_motility_high/"
|
89 |
+
elif args.annotations_train == "Motility-wiggle":
|
90 |
+
annotations_train = dest_train + "annotation_motility_wiggle/"
|
91 |
+
elif args.annotations_train == "Motility-mid":
|
92 |
+
annotations_train = dest_train + "annotation_motility_mid/"
|
93 |
+
elif args.annotations_train == "Sticking-stick":
|
94 |
+
annotations_train = dest_train + "annotation_sticking_stick/"
|
95 |
+
elif args.annotations_train == "Sticking-motile":
|
96 |
+
annotations_train = dest_train + "annotation_sticking_motile/"
|
97 |
+
elif args.annotations_train == "Sticking-non_motile":
|
98 |
+
annotations_train = dest_train + "annotation_sticking_non_motile/"
|
99 |
+
elif args.annotations_train == "Motility-low-wiggle":
|
100 |
+
annotations_train = dest_train + "annotation_motility_low_wiggle/"
|
101 |
+
elif args.annotations_train == "Motility-mid-high":
|
102 |
+
annotations_train = dest_train + "annotation_motility_mid_high/"
|
103 |
+
|
104 |
+
|
105 |
+
if args.annotations_test == "Easy":
|
106 |
+
annotations_test = dest_test + "annotation_easy/"
|
107 |
+
elif args.annotations_test == "Hard":
|
108 |
+
annotations_test = dest_test + "annotation_hard/"
|
109 |
+
elif args.annotations_test == "VeryHard":
|
110 |
+
annotations_test = dest_test + "annotation_veryhard/"
|
111 |
+
elif args.annotations_test == "Easy+Hard":
|
112 |
+
annotations_test = dest_test + "annotation_easy_hard/"
|
113 |
+
elif args.annotations_test == "All":
|
114 |
+
annotations_test = dest_test + "annotation_easy_hard_veryhard/"
|
115 |
+
elif args.annotations_test == "Motility-low":
|
116 |
+
annotations_test = dest_test + "annotation_motility_low/"
|
117 |
+
elif args.annotations_test == "Motility-high":
|
118 |
+
annotations_test = dest_test + "annotation_motility_high/"
|
119 |
+
elif args.annotations_test == "Motility-wiggle":
|
120 |
+
annotations_test = dest_test + "annotation_motility_wiggle/"
|
121 |
+
elif args.annotations_test == "Motility-mid":
|
122 |
+
annotations_test = dest_test + "annotation_motility_mid/"
|
123 |
+
elif args.annotations_test == "Sticking-stick":
|
124 |
+
annotations_test = dest_test + "annotation_sticking_stick/"
|
125 |
+
elif args.annotations_test == "Sticking-motile":
|
126 |
+
annotations_test = dest_test + "annotation_sticking_motile/"
|
127 |
+
elif args.annotations_test == "Sticking-non_motile":
|
128 |
+
annotations_test = dest_test + "annotation_sticking_non_motile/"
|
129 |
+
elif args.annotations_test == "Motility-low-wiggle":
|
130 |
+
annotations_test = dest_test + "annotation_motility_low_wiggle/"
|
131 |
+
elif args.annotations_test == "Motility-mid-high":
|
132 |
+
annotations_test = dest_test + "annotation_motility_mid_high/"
|
133 |
+
|
134 |
+
|
135 |
+
images_train = dest_train + args.feature #"images_feature/"
|
136 |
+
images_test = dest_test + args.feature #"images_feature/"
|
137 |
+
|
138 |
+
factor_w = 1#1024/1388
|
139 |
+
factor_h = 1#1024/1040
|
140 |
+
|
141 |
+
#function to get background frame
|
142 |
+
#function to get prev frame
|
143 |
+
#function to create new image given image num
|
144 |
+
|
145 |
+
for txt_file in natsorted(os.listdir(annotations_train)):
|
146 |
+
#width = 31
|
147 |
+
text_file = open(annotations_train + txt_file, 'r')
|
148 |
+
xy_coords = text_file.readlines()
|
149 |
+
boxes = []
|
150 |
+
res=pd.DataFrame(columns=["file_name","height","width","annotations"])
|
151 |
+
image = PIL.Image.open(images_train + txt_file[:-4] + ".tif").convert('L')
|
152 |
+
image_feature = PIL.Image.open(images_train + txt_file[:-4] + ".tif")
|
153 |
+
image = image_feature
|
154 |
+
#print(image.size)
|
155 |
+
res.at[0,"height"] = image.height
|
156 |
+
res.at[0,"width"] = image.width
|
157 |
+
res.at[0,"file_name"] = txt_file[:-4]+".tif"
|
158 |
+
bbox_mode = 0
|
159 |
+
category_id = 0
|
160 |
+
# image2 = image.resize((1024,1024))
|
161 |
+
# image2.save(images_resized_train + txt_file[:-4] + ".jpg")
|
162 |
+
for xy in xy_coords:
|
163 |
+
box = []
|
164 |
+
x = float(xy.split(" ")[0])
|
165 |
+
y = float(xy.split(" ")[1])
|
166 |
+
x1 = int(x*factor_w - (width // 2))
|
167 |
+
y1 = int(y*factor_h - (width // 2))
|
168 |
+
x2 = int(x*factor_w + (width // 2))
|
169 |
+
y2 = int(y*factor_h + (width // 2))
|
170 |
+
w = h = 31
|
171 |
+
box = [x1, y1, x2, y2]
|
172 |
+
boxes.append(np.array(box))
|
173 |
+
#print(np.array(box))
|
174 |
+
|
175 |
+
res["annotations"]=res["annotations"].astype('object')
|
176 |
+
annotation_df = pd.DataFrame(columns=["bbox","bbox_mode","category_id"])
|
177 |
+
annotation_df["bbox"] = boxes
|
178 |
+
annotation_df["bbox_mode"] = bbox_mode
|
179 |
+
annotation_df["category_id"] = category_id
|
180 |
+
annotations = annotation_df.T.to_dict().values()
|
181 |
+
l = []
|
182 |
+
for j in annotations:
|
183 |
+
l.append(j)
|
184 |
+
res.at[0,"annotations"] = l
|
185 |
+
coco_format_train = coco_format_train.append(res)
|
186 |
+
coco_format_train.reset_index(drop=True,inplace=True)
|
187 |
+
|
188 |
+
coco_format_train.reset_index(inplace=True)
|
189 |
+
coco_format_train.rename(columns={"index":"image_id"},inplace=True)
|
190 |
+
coco_format_train.to_json(source + f"train_{args.exp}.json",orient="records")
|
191 |
+
|
192 |
+
coco_format_test = pd.DataFrame(columns=["file_name","height","width","annotations"])
|
193 |
+
coco_format_test["annotations"] = coco_format_test["annotations"].astype('object')
|
194 |
+
|
195 |
+
for txt_file in natsorted(os.listdir(annotations_test)):
|
196 |
+
#width = 31
|
197 |
+
text_file = open(annotations_test + txt_file, 'r')
|
198 |
+
xy_coords = text_file.readlines()
|
199 |
+
boxes = []
|
200 |
+
res=pd.DataFrame(columns=["file_name","height","width","annotations"])
|
201 |
+
image = PIL.Image.open(images_test + txt_file[:-4] + ".tif").convert('L')
|
202 |
+
image_feature = PIL.Image.open(images_test + txt_file[:-4] + ".tif")
|
203 |
+
image = image_feature
|
204 |
+
#print(image.size)
|
205 |
+
res.at[0,"height"] = image.height
|
206 |
+
res.at[0,"width"] = image.width
|
207 |
+
res.at[0,"file_name"] = txt_file[:-4]+".tif"
|
208 |
+
bbox_mode = 0
|
209 |
+
category_id = 0
|
210 |
+
# image2 = image.resize((1024,1024))
|
211 |
+
# image2.save(images_resized_train + txt_file[:-4] + ".jpg")
|
212 |
+
for xy in xy_coords:
|
213 |
+
box = []
|
214 |
+
x = float(xy.split(" ")[0])
|
215 |
+
y = float(xy.split(" ")[1])
|
216 |
+
x1 = int(x*factor_w - (width // 2))
|
217 |
+
y1 = int(y*factor_h - (width // 2))
|
218 |
+
x2 = int(x*factor_w + (width // 2))
|
219 |
+
y2 = int(y*factor_h + (width // 2))
|
220 |
+
w = h = 31
|
221 |
+
box = [x1, y1, x2, y2]
|
222 |
+
boxes.append(np.array(box))
|
223 |
+
#print(np.array(box))
|
224 |
+
|
225 |
+
res["annotations"]=res["annotations"].astype('object')
|
226 |
+
annotation_df = pd.DataFrame(columns=["bbox","bbox_mode","category_id"])
|
227 |
+
annotation_df["bbox"] = boxes
|
228 |
+
annotation_df["bbox_mode"] = bbox_mode
|
229 |
+
annotation_df["category_id"] = category_id
|
230 |
+
annotations = annotation_df.T.to_dict().values()
|
231 |
+
l = []
|
232 |
+
for j in annotations:
|
233 |
+
l.append(j)
|
234 |
+
res.at[0,"annotations"] = l
|
235 |
+
coco_format_test = coco_format_test.append(res)
|
236 |
+
coco_format_test.reset_index(drop=True,inplace=True)
|
237 |
+
|
238 |
+
coco_format_test.reset_index(inplace=True)
|
239 |
+
coco_format_test.rename(columns={"index":"image_id"},inplace=True)
|
240 |
+
coco_format_test.to_json(source + f"val_{args.exp}.json",orient="records")
|
241 |
+
|
242 |
+
|
243 |
+
|
244 |
+
def get_board_dicts(imgdir, mode):
|
245 |
+
if mode is 'train':
|
246 |
+
json_file = imgdir+f"/train_{args.exp}.json" #Fetch the json file
|
247 |
+
if mode is 'val':
|
248 |
+
json_file = imgdir+f"/val_{args.exp}.json" #Fetch the json file
|
249 |
+
with open(json_file) as f:
|
250 |
+
dataset_dicts = json.load(f)
|
251 |
+
for i in dataset_dicts:
|
252 |
+
filename = i["file_name"]
|
253 |
+
if mode is 'train':
|
254 |
+
i["file_name"] = imgdir + train_images_path + filename
|
255 |
+
if mode is 'val':
|
256 |
+
i["file_name"] = imgdir + val_image_path + filename
|
257 |
+
for j in i["annotations"]:
|
258 |
+
j["bbox_mode"] = BoxMode.XYXY_ABS #Setting the required Box Mode
|
259 |
+
j["category_id"] = int(j["category_id"])
|
260 |
+
return dataset_dicts
|
261 |
+
|
262 |
+
#Registering the Dataset
|
263 |
+
for d in ["train", "val"]:
|
264 |
+
DatasetCatalog.register("boardetect_" + d, lambda d=d: get_board_dicts(source, d))
|
265 |
+
MetadataCatalog.get("boardetect_" + d).set(thing_classes=["node"])
|
266 |
+
board_metadata = MetadataCatalog.get("boardetect_train")
|
267 |
+
val_metadata = MetadataCatalog.get("boardetect_val")
|
268 |
+
|
269 |
+
train_data = ("boardetect_train",)
|
270 |
+
|
271 |
+
if val ==True:
|
272 |
+
val_data = ("boardetect_val",)
|
273 |
+
else:
|
274 |
+
val_data = ("boardetect_train",)
|
275 |
+
|
276 |
+
|
277 |
+
|
278 |
+
class CocoTrainer(DefaultTrainer):
|
279 |
+
|
280 |
+
@classmethod
|
281 |
+
def build_evaluator(cls, cfg, dataset_name, output_folder=None):
|
282 |
+
if output_folder is None:
|
283 |
+
os.makedirs(cfg.OUTPUT_DIR, exist_ok=True)
|
284 |
+
output_folder = cfg.OUTPUT_DIR
|
285 |
+
return COCOEvaluator(dataset_name, cfg, False, output_folder)
|
286 |
+
|
287 |
+
|
288 |
+
def build_hooks(self):
|
289 |
+
hooks = super().build_hooks()
|
290 |
+
hooks.insert(-1,LossEvalHook(
|
291 |
+
cfg.TEST.EVAL_PERIOD,
|
292 |
+
self.model,
|
293 |
+
build_detection_test_loader(
|
294 |
+
self.cfg,
|
295 |
+
self.cfg.DATASETS.TEST[0],
|
296 |
+
DatasetMapper(self.cfg,True)
|
297 |
+
)
|
298 |
+
))
|
299 |
+
return hooks
|
300 |
+
|
301 |
+
cfg = get_cfg()
|
302 |
+
cfg.MODEL.DEVICE = f"cuda:{args.cuda}"
|
303 |
+
cfg.merge_from_file(model_zoo.get_config_file("COCO-Detection/retinanet_R_101_FPN_3x.yaml")) #Get the basic model configuration from the model zoo
|
304 |
+
#Passing the Train and Validation sets
|
305 |
+
cfg.DATASETS.TRAIN = train_data #("boardetect_train",)
|
306 |
+
cfg.DATASETS.TEST = val_data #("boardetect_train",)
|
307 |
+
cfg.OUTPUT_DIR = output_dir #("comparison-optical-flow")
|
308 |
+
# Number of data loading threads
|
309 |
+
cfg.DATALOADER.NUM_WORKERS = 4
|
310 |
+
#cfg.MODEL.WEIGHTS = model_zoo.get_checkpoint_url("COCO-Detection/faster_rcnn_R_50_FPN_3x.yaml") # Let training initialize from model zoo
|
311 |
+
#cfg.MODEL.WEIGHTS = os.path.join(cfg.OUTPUT_DIR, "model_final.pth") #uncommwnt during inference
|
312 |
+
#cfg.MODEL.WEIGHTS = os.path.join(cfg.OUTPUT_DIR, "model_final.pth") #uncommwnt during inference
|
313 |
+
#cfg.MODEL.WEIGHTS = os.path.join(cfg.OUTPUT_DIR, "model_00044999.pth") #uncommwnt during inference
|
314 |
+
# Number of images per batch across all machines.
|
315 |
+
cfg.SOLVER.IMS_PER_BATCH = 4
|
316 |
+
cfg.SOLVER.BASE_LR = float(args.lr) #0.00125 # pick a good LearningRate
|
317 |
+
cfg.SOLVER.MAX_ITER = int(args.epochs) #90000 #No. of iterations
|
318 |
+
# cfg.SOLVER.STEPS = (300,600)
|
319 |
+
cfg.MODEL.ROI_HEADS.BATCH_SIZE_PER_IMAGE = 80
|
320 |
+
cfg.MODEL.ROI_HEADS.NUM_CLASSES = 1
|
321 |
+
cfg.TEST.EVAL_PERIOD = 500 # No. of iterations after which the Validation Set is evaluated.
|
322 |
+
cfg.TEST.DETECTIONS_PER_IMAGE = 60
|
323 |
+
cfg.SOLVER.CHECKPOINT_PERIOD = 500
|
324 |
+
cfg.VIS_PERIOD = 500
|
325 |
+
cfg.DATASETS.PRECOMPUTED_PROPOSAL_TOPK_TRAIN = 100
|
326 |
+
cfg.DATASETS.PRECOMPUTED_PROPOSAL_TOPK_TEST = 100
|
327 |
+
cfg.MODEL.RPN.POST_NMS_TOPK_TRAIN = 80 #
|
328 |
+
cfg.MODEL.RPN.POST_NMS_TOPK_TEST = 80
|
329 |
+
os.makedirs(cfg.OUTPUT_DIR, exist_ok=True)
|
330 |
+
trainer = CocoTrainer(cfg)
|
331 |
+
trainer.resume_or_load(resume=True) #resume=True for inference
|
332 |
+
trainer.train()
|
333 |
+
print(cfg.MODEL.WEIGHTS)
|
MEMTrack/todo.txt
ADDED
@@ -0,0 +1,7 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
1. Update low to no motility and wiggle to low
|
2 |
+
2. add instructions for feature genration code
|
3 |
+
3. update feature generation code
|
4 |
+
4. update training scripts, add instructions for that
|
5 |
+
5. add inference code, instructions for that
|
6 |
+
6. inference on other data
|
7 |
+
7.
|
app.py
CHANGED
@@ -1,166 +1,43 @@
|
|
|
|
|
|
1 |
import cv2
|
2 |
-
import gradio as gr
|
3 |
-
import numpy as np
|
4 |
import os
|
|
|
5 |
import shutil
|
6 |
-
|
7 |
-
|
8 |
-
#
|
9 |
-
# # Create a directory to store frames
|
10 |
-
# frames_dir = Path("frames")
|
11 |
-
# frames_dir.mkdir(exist_ok=True)
|
12 |
-
|
13 |
-
# # Read the video
|
14 |
-
# cap = cv2.VideoCapture(input_video)
|
15 |
-
# count = 0
|
16 |
-
# frames = []
|
17 |
-
|
18 |
-
# while True:
|
19 |
-
# ret, frame = cap.read()
|
20 |
-
# if not ret:
|
21 |
-
# break
|
22 |
-
# frame_path = str(frames_dir / f"frame_{count}.jpg")
|
23 |
-
# cv2.imwrite(frame_path, frame)
|
24 |
-
# frames.append(frame)
|
25 |
-
# count += 1
|
26 |
-
|
27 |
-
# # Close video file
|
28 |
-
# cap.release()
|
29 |
-
|
30 |
-
# # Process the first and the last frame (simple example: convert to grayscale)
|
31 |
-
# if frames:
|
32 |
-
# frames[0] = cv2.cvtColor(frames[0], cv2.COLOR_BGR2GRAY)
|
33 |
-
# frames[-1] = cv2.cvtColor(frames[-1], cv2.COLOR_BGR2GRAY)
|
34 |
-
|
35 |
-
# # Create a processed video
|
36 |
-
# output_video_path = "processed_video.mp4"
|
37 |
-
# height, width = frames[0].shape[:2]
|
38 |
-
# fourcc = cv2.VideoWriter_fourcc(*'mp4v') # or use 'XVID' if mp4 does not work
|
39 |
-
# out = cv2.VideoWriter(output_video_path, fourcc, 60.0, (width, height), isColor=len(frames[0].shape) > 2)
|
40 |
-
|
41 |
-
# for frame in frames:
|
42 |
-
# out.write(frame)
|
43 |
-
|
44 |
-
# out.release()
|
45 |
-
# return output_video_path
|
46 |
-
|
47 |
-
|
48 |
-
def process_video(input_video):
|
49 |
-
# Create a directory to store difference frames
|
50 |
-
diff_frames_dir = Path("diff_frames")
|
51 |
-
if os.path.exists(diff_frames_dir):
|
52 |
-
shutil.rmtree(diff_frames_dir)
|
53 |
-
diff_frames_dir.mkdir(exist_ok=True)
|
54 |
-
|
55 |
-
frames_dir = Path("frames")
|
56 |
-
if os.path.exists(frames_dir):
|
57 |
-
shutil.rmtree(frames_dir)
|
58 |
-
frames_dir.mkdir(exist_ok=True)
|
59 |
-
|
60 |
-
# Read the video
|
61 |
-
cap = cv2.VideoCapture(input_video)
|
62 |
-
count = 0
|
63 |
-
diff_frames = []
|
64 |
-
prev_frame = None
|
65 |
-
|
66 |
-
while True:
|
67 |
-
ret, frame = cap.read()
|
68 |
-
if not ret:
|
69 |
-
break
|
70 |
-
|
71 |
-
# Convert frame to grayscale
|
72 |
-
gray_frame = frame #cv2.cvtColor(frame, cv2.COLOR_BGR2GRAY)
|
73 |
-
|
74 |
-
frame_path = str(frames_dir / f"frame_{count}.jpg")
|
75 |
-
cv2.imwrite(frame_path, frame)
|
76 |
-
|
77 |
-
if prev_frame is not None:
|
78 |
-
# Calculate difference with the previous frame
|
79 |
-
diff = cv2.absdiff(prev_frame, gray_frame)
|
80 |
-
diff_frames.append(gray_frame)
|
81 |
-
|
82 |
-
# Save difference frame
|
83 |
-
diff_frame_path = str(diff_frames_dir / f"diff_frame_{count}.jpg")
|
84 |
-
cv2.imwrite(diff_frame_path, diff)
|
85 |
-
else:
|
86 |
-
# For the first frame, there is no previous frame to compare
|
87 |
-
diff_frames.append(gray_frame)
|
88 |
-
|
89 |
-
prev_frame = gray_frame
|
90 |
-
count += 1
|
91 |
-
fps = cap.get(cv2.CAP_PROP_FPS)
|
92 |
-
print(fps)
|
93 |
-
size = (int(cap.get(cv2.CAP_PROP_FRAME_WIDTH)), int(cap.get(cv2.CAP_PROP_FRAME_HEIGHT)))
|
94 |
-
print(size)
|
95 |
-
# Close video file
|
96 |
-
cap.release()
|
97 |
-
|
98 |
-
# Create a video from the difference frames
|
99 |
-
output_video_path = "diff_video.mp4"
|
100 |
-
if diff_frames:
|
101 |
-
print("Here")
|
102 |
-
height, width = diff_frames[0].shape[:2]
|
103 |
-
fourcc = cv2.VideoWriter_fourcc(*"mp4v") # Use 'mp4v' for MP4 or 'XVID' for AVI
|
104 |
-
out = cv2.VideoWriter(output_video_path, fourcc, fps, size, isColor=True)
|
105 |
-
|
106 |
-
for diff_frame in diff_frames:
|
107 |
-
# Need to convert single channel image to three channels to write with VideoWriter
|
108 |
-
# three_channel_frame = cv2.cvtColor(diff_frame, cv2.COLOR_GRAY2BGR)
|
109 |
-
out.write(diff_frame)
|
110 |
-
|
111 |
-
out.release()
|
112 |
-
|
113 |
-
return output_video_path
|
114 |
-
|
115 |
-
def show_preds_video(input_video):
|
116 |
-
cap = cv2.VideoCapture(input_video)
|
117 |
-
while(cap.isOpened()):
|
118 |
-
ret, frame = cap.read()
|
119 |
-
if ret:
|
120 |
-
frame_copy = frame.copy()
|
121 |
-
|
122 |
-
yield frame_copy
|
123 |
-
|
124 |
-
|
125 |
-
# # Define Gradio Interface
|
126 |
-
# iface = gr.Interface(
|
127 |
-
# fn=process_video,
|
128 |
-
# inputs=gr.Video(label="Upload a Video"),
|
129 |
-
# outputs=gr.Video(label="Processed Video"),
|
130 |
-
# title="Video Frame Processor",
|
131 |
-
# description="Upload a video to split it into frames, process the first and last frame, and return the video."
|
132 |
-
# )
|
133 |
-
|
134 |
-
# # Define Gradio Interface
|
135 |
-
# iface = gr.Interface(
|
136 |
-
# fn=show_preds_video,
|
137 |
-
# inputs=gr.Video(label="Upload a Video"),
|
138 |
-
# outputs=[gr.components.Image(type="numpy", label="Output Image")],
|
139 |
-
# cache_examples=False,
|
140 |
-
# title="Video Frame Processor",
|
141 |
-
# description="Upload a video to split it into frames, process the first and last frame, and return the video."
|
142 |
-
# )
|
143 |
-
|
144 |
-
|
145 |
-
# if __name__ == "__main__":
|
146 |
-
# iface.launch(share=True)
|
147 |
-
|
148 |
-
|
149 |
-
# # import spaces
|
150 |
-
import gradio as gr
|
151 |
-
import cv2
|
152 |
-
import numpy as np
|
153 |
import time
|
154 |
import random
|
|
|
|
|
155 |
from PIL import Image
|
156 |
-
|
157 |
-
|
158 |
-
|
159 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
160 |
cap = cv2.VideoCapture(video)
|
161 |
total_frames = int(cap.get(cv2.CAP_PROP_FRAME_COUNT)) # Get total frames
|
162 |
-
writer = None
|
163 |
-
tmpname = random.randint(111111111, 999999999)
|
164 |
processed_frames = 0
|
165 |
|
166 |
while cap.isOpened():
|
@@ -169,28 +46,133 @@ def doo(video, mode, progress=gr.Progress()):
|
|
169 |
if ret is False:
|
170 |
break
|
171 |
|
172 |
-
frame = cv2.cvtColor(frame, cv2.
|
173 |
-
|
174 |
-
|
175 |
-
|
176 |
-
|
177 |
-
|
178 |
processed_frames += 1
|
179 |
print(f"Processing frame {processed_frames}")
|
180 |
-
progress(processed_frames / total_frames, desc=f"
|
181 |
-
|
182 |
-
writer.write(cv2.cvtColor(np.array(out), cv2.COLOR_BGR2RGB))
|
183 |
|
184 |
cap.release()
|
185 |
-
|
186 |
-
|
|
|
|
|
|
|
|
|
187 |
|
188 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
189 |
|
190 |
iface = gr.Interface(
|
191 |
fn=doo,
|
192 |
-
inputs="
|
193 |
-
outputs=
|
194 |
-
|
|
|
|
|
|
|
|
|
|
|
195 |
)
|
196 |
-
|
|
|
|
|
|
|
|
1 |
+
import os
|
2 |
+
import sys
|
3 |
import cv2
|
|
|
|
|
4 |
import os
|
5 |
+
import glob
|
6 |
import shutil
|
7 |
+
import gdown
|
8 |
+
import zipfile
|
9 |
+
# import spaces
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
10 |
import time
|
11 |
import random
|
12 |
+
import gradio as gr
|
13 |
+
import numpy as np
|
14 |
from PIL import Image
|
15 |
+
from pathlib import Path
|
16 |
+
sys.path.insert(1, "MEMTrack/src")
|
17 |
+
from data_prep_utils import process_data
|
18 |
+
from data_feature_gen import create_train_data, create_test_data
|
19 |
+
from inferenceBacteriaRetinanet_Motility_v2 import run_inference
|
20 |
+
from GenerateTrackingData import gen_tracking_data
|
21 |
+
from Tracking import track_bacteria
|
22 |
+
from TrackingAnalysis import analyse_tracking
|
23 |
+
from GenerateVideo import gen_tracking_video
|
24 |
+
|
25 |
+
def find_and_return_csv_files(folder_path, search_pattern):
|
26 |
+
search_pattern = f"{folder_path}/{search_pattern}*.csv"
|
27 |
+
csv_files = list(glob.glob(search_pattern))
|
28 |
+
return csv_files
|
29 |
+
|
30 |
+
def read_video(video, raw_frame_dir, progress=gr.Progress()):
|
31 |
+
# read video and save frames
|
32 |
+
video_dir = str(random.randint(111111111, 999999999))
|
33 |
+
images_dir = "Images without Labels"
|
34 |
+
frames_dir = os.path.join(raw_frame_dir, video_dir, images_dir)
|
35 |
+
os.makedirs(frames_dir, exist_ok=True)
|
36 |
+
count = 0
|
37 |
+
frames = []
|
38 |
+
|
39 |
cap = cv2.VideoCapture(video)
|
40 |
total_frames = int(cap.get(cv2.CAP_PROP_FRAME_COUNT)) # Get total frames
|
|
|
|
|
41 |
processed_frames = 0
|
42 |
|
43 |
while cap.isOpened():
|
|
|
46 |
if ret is False:
|
47 |
break
|
48 |
|
49 |
+
frame = cv2.cvtColor(frame, cv2.COLOR_BGR2GRAY)
|
50 |
+
frame_path = os.path.join(frames_dir, f"{count}.jpg")
|
51 |
+
cv2.imwrite(frame_path, frame)
|
52 |
+
frames.append(frame)
|
53 |
+
count += 1
|
|
|
54 |
processed_frames += 1
|
55 |
print(f"Processing frame {processed_frames}")
|
56 |
+
progress(processed_frames / total_frames, desc=f"Reading frame {processed_frames}/{total_frames}")
|
57 |
+
|
|
|
58 |
|
59 |
cap.release()
|
60 |
+
return video_dir
|
61 |
+
|
62 |
+
def download_and_unzip_google_drive_file(file_id, output_path, unzip_path):
|
63 |
+
url = f'https://drive.google.com/uc?id={file_id}'
|
64 |
+
url="https://drive.usercontent.google.com/download?id=1agsLD5HV_VmDNpDhjHXTCAVmGUm2IQ6p&export=download&&confirm=t"
|
65 |
+
gdown.download(url, output_path, quiet=False, )
|
66 |
|
67 |
+
with zipfile.ZipFile(output_path, 'r') as zip_ref:
|
68 |
+
zip_ref.extractall(unzip_path)
|
69 |
+
|
70 |
+
|
71 |
+
# @spaces.GPU()
|
72 |
+
def doo(video, progress=gr.Progress()):
|
73 |
+
# download and unzip models
|
74 |
+
file_id = '1agsLD5HV_VmDNpDhjHXTCAVmGUm2IQ6p'
|
75 |
+
output_path = 'models.zip'
|
76 |
+
unzip_path = './'
|
77 |
+
|
78 |
+
download_and_unzip_google_drive_file(file_id, output_path, unzip_path)
|
79 |
+
|
80 |
+
# Initialize paths and variables
|
81 |
+
raw_frame_dir = "raw_data/" # Path to raw videos vefore processing (same format as sample data)
|
82 |
+
final_data_dir = "data" # root directory to store processed videos
|
83 |
+
out_sub_dir = "bacteria" # sub directory to store processed videos
|
84 |
+
target_data_sub_dir = os.path.join(final_data_dir, out_sub_dir)
|
85 |
+
feature_dir = "DataFeatures" # directory to store processed videos
|
86 |
+
test_video_list = ["video1"] # list of videos to generate features for
|
87 |
+
exp_name = "collagen_motility_inference" # name of experiment
|
88 |
+
feature_data_path = os.path.join(feature_dir, exp_name)
|
89 |
+
|
90 |
+
|
91 |
+
# #path to saved models
|
92 |
+
# no_motility_model_path = "models/motility/no/collagen_optical_flow_median_bkg_more_data_90k/"
|
93 |
+
# low_motility_model_path = "models/motility/low/collagen_optical_flow_median_bkg_more_data_90k/"
|
94 |
+
# mid_motility_model_path = "models/motility/mid/collagen_optical_flow_median_bkg_more_data_90k/"
|
95 |
+
# high_motility_model_path = "models/motility/high/collagen_optical_flow_median_bkg_more_data_90k/"
|
96 |
+
|
97 |
+
|
98 |
+
# # Clear previous results and data
|
99 |
+
# if os.path.exists(final_data_dir):
|
100 |
+
# shutil.rmtree(final_data_dir)
|
101 |
+
# if os.path.exists(raw_frame_dir):
|
102 |
+
# shutil.rmtree(raw_frame_dir)
|
103 |
+
# if os.path.exists(feature_dir):
|
104 |
+
# shutil.rmtree(feature_dir)
|
105 |
+
|
106 |
+
# # Read video and store frames separately for object detection model
|
107 |
+
# video_dir = read_video(video, raw_frame_dir, progress=gr.Progress())
|
108 |
+
# # Process raw frames and store in acceptable format
|
109 |
+
# progress(1 / 3, desc=f"Processing Frames {1}/{3}")
|
110 |
+
# video_num = process_data(video_dir, raw_frame_dir, final_data_dir, out_sub_dir)
|
111 |
+
# progress(3 / 3, desc=f"Processing Frames {3}/{3}")
|
112 |
+
# # generate features for raw frames for the object detector model
|
113 |
+
# progress(1 / 3, desc=f"Generating Features {1}/{3}")
|
114 |
+
# create_test_data(target_data_sub_dir, feature_dir, exp_name, test_video_list)
|
115 |
+
# progress(3 / 3, desc=f"Features Generated {3}/{3}")
|
116 |
+
|
117 |
+
|
118 |
+
# progress(1 / 3, desc=f"Loading Models {1}/{3}")
|
119 |
+
# # Run Object Detection Code
|
120 |
+
# for video_num in [1]:
|
121 |
+
# #To genearate testing files for all motilities
|
122 |
+
# run_inference(video_num=video_num, output_dir=no_motility_model_path,
|
123 |
+
# annotations_test="All", test_dir=feature_data_path, register_dataset=True)
|
124 |
+
# progress(3 / 3, desc=f"Models Loaded{3}/{3}")
|
125 |
+
# run_inference(video_num=video_num, output_dir=mid_motility_model_path,
|
126 |
+
# annotations_test="Motility-mid", test_dir=feature_data_path, register_dataset=False)
|
127 |
+
# progress(1 / 3, desc=f"Running Bacteria Detection {1}/{3}")
|
128 |
+
|
129 |
+
# run_inference(video_num=video_num, output_dir=high_motility_model_path,
|
130 |
+
# annotations_test="Motility-high", test_dir=feature_data_path, register_dataset=False)
|
131 |
+
# progress(2 / 3, desc=f"Running Bacteria Detection {2}/{3}")
|
132 |
+
|
133 |
+
# run_inference(video_num=video_num, output_dir=low_motility_model_path,
|
134 |
+
# annotations_test="Motility-low", test_dir=feature_data_path, register_dataset=False)
|
135 |
+
# progress(3 / 3, desc=f"Running Bacteria Detection {3}/{3}")
|
136 |
+
|
137 |
+
# # Tracking where GT is present
|
138 |
+
# progress(0 / 3, desc=f"Tracking {0}/{3}")
|
139 |
+
for video_num in [1]:
|
140 |
+
# gen_tracking_data(video_num=video_num, data_path=feature_data_path, filter_thresh=0.3)
|
141 |
+
# progress(1 / 3, desc=f"Tracking {1}/{3}")
|
142 |
+
# track_bacteria(video_num=video_num, max_age=35, max_interpolation=35, data_path=feature_data_path)
|
143 |
+
# progress(2 / 3, desc=f"Tracking {2}/{3}")
|
144 |
+
folder_path = analyse_tracking(video_num=video_num, data_feature_path=feature_data_path, data_root_path=final_data_dir, plot=True)
|
145 |
+
progress(3 / 3, desc=f"Tracking {3}/{3}")
|
146 |
+
output_video = gen_tracking_video(video_num=video_num, fps=60, data_path=feature_data_path)
|
147 |
+
final_video = os.path.basename(output_video)
|
148 |
+
shutil.copy(output_video, final_video)
|
149 |
+
print(output_video)
|
150 |
+
print(final_video)
|
151 |
+
|
152 |
+
search_pattern = "TrackedRawData"
|
153 |
+
tracking_preds = find_and_return_csv_files(folder_path, search_pattern)
|
154 |
+
|
155 |
+
|
156 |
+
return final_video, tracking_preds #str(tmpname) + '.mp4'
|
157 |
+
|
158 |
+
|
159 |
+
examples = [['./sample_videos/control_4_h264.mp4']]
|
160 |
+
|
161 |
+
title = "🎞️ MEMTrack Bacteria Tracking Video Tool"
|
162 |
+
description = "Upload a video or selct from example to track. <br><br> If the input video does not play on browser, ensure its in a browser accetable format. Output will be generated iirespective of playback on browser. Refer: https://colab.research.google.com/drive/1U5pX_9iaR_T8knVV7o4ftKdDoGndCdEM?usp=sharing"
|
163 |
|
164 |
iface = gr.Interface(
|
165 |
fn=doo,
|
166 |
+
inputs=gr.Video(label="Input Video"),
|
167 |
+
outputs=[
|
168 |
+
gr.Video(label="Tracked Video"),
|
169 |
+
gr.File(label="CSV Data")
|
170 |
+
],
|
171 |
+
examples=examples,
|
172 |
+
title=title,
|
173 |
+
description=description
|
174 |
)
|
175 |
+
|
176 |
+
|
177 |
+
if __name__ == "__main__":
|
178 |
+
iface.launch(share=True)
|
requirements.txt
CHANGED
@@ -2,4 +2,38 @@ gradio
|
|
2 |
torch
|
3 |
torchvision
|
4 |
opencv-python
|
5 |
-
tqdm
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
2 |
torch
|
3 |
torchvision
|
4 |
opencv-python
|
5 |
+
tqdm
|
6 |
+
pip==22.0.3
|
7 |
+
numpy
|
8 |
+
filterpy==1.4.5
|
9 |
+
json5==0.9.6
|
10 |
+
jsonlines==3.0.0
|
11 |
+
jsonpatch==1.32
|
12 |
+
jsonpointer==2.1
|
13 |
+
jsonschema==3.2.0
|
14 |
+
jupyter-client==7.0.3
|
15 |
+
jupyter-core==4.8.1
|
16 |
+
jupyter-server==1.11.0
|
17 |
+
jupyterlab==3.1.13
|
18 |
+
jupyterlab-pygments==0.1.2
|
19 |
+
jupyterlab-server==2.8.1
|
20 |
+
jupyterlab-widgets==1.0.2
|
21 |
+
lap==0.4.0
|
22 |
+
matplotlib==3.3.4
|
23 |
+
motmetrics==1.2.5
|
24 |
+
natsort==7.1.1
|
25 |
+
numpy==1.19.5
|
26 |
+
opencv-python==4.5.5.64
|
27 |
+
opencv-python-headless==4.5.4.60
|
28 |
+
openpyxl
|
29 |
+
pandas==1.1.5
|
30 |
+
plotly==5.11.0
|
31 |
+
scikit-image==0.17.2
|
32 |
+
scikit-learn==0.24.2
|
33 |
+
scipy==1.5.4
|
34 |
+
seaborn==0.11.2
|
35 |
+
torch==1.9.1
|
36 |
+
torchfile==0.1.0
|
37 |
+
torchmetrics==0.5.1
|
38 |
+
torchvision==0.10.1
|
39 |
+
tqdm==4.62.3
|