replace maskrcnn-benchmark nms with torchvision nms

This commit is contained in:
Zhongdao 2020-01-09 22:48:17 +08:00
parent 1cb8cee836
commit be116014d6
46 changed files with 112372 additions and 111016 deletions

224
.gitignore vendored
View File

@ -1,112 +1,112 @@
results/
weights/
data/mot17
data/mot19
tmp/
external/
=======
# Byte-compiled / optimized / DLL files
__pycache__/
*.py[cod]
*$py.class
# C extensions
*.so
!utils/*.so
# Distribution / packaging
.Python
build/
develop-eggs/
dist/
downloads/
eggs/
.eggs/
lib/
lib64/
parts/
sdist/
var/
wheels/
*.egg-info/
.installed.cfg
*.egg
MANIFEST
# PyInstaller
# Usually these files are written by a python script from a template
# before PyInstaller builds the exe, so as to inject date/other infos into it.
*.manifest
*.spec
# Installer logs
pip-log.txt
pip-delete-this-directory.txt
# Unit test / coverage reports
htmlcov/
.tox/
.coverage
.coverage.*
.cache
nosetests.xml
coverage.xml
*.cover
.hypothesis/
.pytest_cache/
# Translations
*.mo
*.pot
# Django stuff:
*.log
local_settings.py
db.sqlite3
# Flask stuff:
instance/
.webassets-cache
# Scrapy stuff:
.scrapy
# Sphinx documentation
docs/_build/
# PyBuilder
target/
# Jupyter Notebook
.ipynb_checkpoints
# pyenv
.python-version
# celery beat schedule file
celerybeat-schedule
# SageMath parsed files
*.sage.py
# Environments
.env
.venv
env/
venv/
ENV/
env.bak/
venv.bak/
# Spyder project settings
.spyderproject
.spyproject
# Rope project settings
.ropeproject
# mkdocs documentation
/site
# mypy
.mypy_cache/
results/
weights/
data/mot17
data/mot19
tmp/
external/
=======
# Byte-compiled / optimized / DLL files
__pycache__/
*.py[cod]
*$py.class
# C extensions
*.so
!utils/*.so
# Distribution / packaging
.Python
build/
develop-eggs/
dist/
downloads/
eggs/
.eggs/
lib/
lib64/
parts/
sdist/
var/
wheels/
*.egg-info/
.installed.cfg
*.egg
MANIFEST
# PyInstaller
# Usually these files are written by a python script from a template
# before PyInstaller builds the exe, so as to inject date/other infos into it.
*.manifest
*.spec
# Installer logs
pip-log.txt
pip-delete-this-directory.txt
# Unit test / coverage reports
htmlcov/
.tox/
.coverage
.coverage.*
.cache
nosetests.xml
coverage.xml
*.cover
.hypothesis/
.pytest_cache/
# Translations
*.mo
*.pot
# Django stuff:
*.log
local_settings.py
db.sqlite3
# Flask stuff:
instance/
.webassets-cache
# Scrapy stuff:
.scrapy
# Sphinx documentation
docs/_build/
# PyBuilder
target/
# Jupyter Notebook
.ipynb_checkpoints
# pyenv
.python-version
# celery beat schedule file
celerybeat-schedule
# SageMath parsed files
*.sage.py
# Environments
.env
.venv
env/
venv/
ENV/
env.bak/
venv.bak/
# Spyder project settings
.spyderproject
.spyproject
# Rope project settings
.ropeproject
# mkdocs documentation
/site
# mypy
.mypy_cache/

View File

@ -1,176 +1,176 @@
# Dataset Zoo
We provide several relevant datasets for training and evaluating the Joint Detection and Embedding (JDE) model.
Annotations are provided in a unified format. If you want to use these datasets, please **follow their licenses**,
and if you use these datasets in your research, please cite the original work (you can find the bibtex in the bottom).
## Data Format
All the dataset has the following structrue:
```
Caltech
|——————images
| └——————00001.jpg
| |—————— ...
| └——————0000N.jpg
└——————labels_with_ids
└——————00001.txt
|—————— ...
└——————0000N.txt
```
Every image corresponds to an annation text. Given an image path,
the annotation text path can be easily generated by replacing the string `images` with `labels_with_ids` and replacing `.jpg` with `.txt`.
In the annotation text, each line is a bounding box and has the following format,
```
[class] [identity] [x_center] [y_center] [width] [height]
```
The field `[class]` is not used in this project since we only care about a single class, i.e., pedestrian here.
The field `[identity]` is an integer from `0` to `num_identities - 1`, or `-1` if this box has no identity annotation.
***Note** that the values of `[x_center] [y_center] [width] [height]` are normalized by the width/height of the image, so they are float numbers ranging from 0 to 1.
## Download
### Caltech Pedestrian
Baidu NetDisk:
[[0]](https://pan.baidu.com/s/1sYBXXvQaXZ8TuNwQxMcAgg)
[[1]](https://pan.baidu.com/s/1lVO7YBzagex1xlzqPksaPw)
[[2]](https://pan.baidu.com/s/1PZXxxy_lrswaqTVg0GuHWg)
[[3]](https://pan.baidu.com/s/1M93NCo_E6naeYPpykmaNgA)
[[4]](https://pan.baidu.com/s/1ZXCdPNXfwbxQ4xCbVu5Dtw)
[[5]](https://pan.baidu.com/s/1kcZkh1tcEiBEJqnDtYuejg)
[[6]](https://pan.baidu.com/s/1sDjhtgdFrzR60KKxSjNb2A)
[[7]](https://pan.baidu.com/s/18Zvp_d33qj1pmutFDUbJyw)
Google Drive: [[annotation]](https://drive.google.com/file/d/1h8vxl_6tgi9QVYoer9XcY9YwNB32TE5k/view?usp=sharing) ,
please download all the `.tar` file from [this page](http://www.vision.caltech.edu/Image_Datasets/CaltechPedestrians/datasets/USA/) and unzip the images under `Caltech/images`
Original dataset webpage: [CaltechPedestrians](http://www.vision.caltech.edu/Image_Datasets/CaltechPedestrians/)
### CityPersons
Baidu NetDisk:
[[0]](https://pan.baidu.com/s/1g24doGOdkKqmbgbJf03vsw)
[[1]](https://pan.baidu.com/s/1mqDF9M5MdD3MGxSfe0ENsA)
[[2]](https://pan.baidu.com/s/1Qrbh9lQUaEORCIlfI25wdA)
[[3]](https://pan.baidu.com/s/1lw7shaffBgARDuk8mkkHhw)
Google Drive:
[[0]](https://drive.google.com/file/d/1DgLHqEkQUOj63mCrS_0UGFEM9BG8sIZs/view?usp=sharing)
[[1]](https://drive.google.com/file/d/1BH9Xz59UImIGUdYwUR-cnP1g7Ton_LcZ/view?usp=sharing)
[[2]](https://drive.google.com/file/d/1q_OltirP68YFvRWgYkBHLEFSUayjkKYE/view?usp=sharing)
[[3]](https://drive.google.com/file/d/1VSL0SFoQxPXnIdBamOZJzHrHJ1N2gsTW/view?usp=sharing)
Original dataset webpage: [Citypersons pedestrian detection dataset](https://bitbucket.org/shanshanzhang/citypersons)
### CUHK-SYSU
Baidu NetDisk:
[[0]](https://pan.baidu.com/s/1YFrlyB1WjcQmFW3Vt_sEaQ)
Google Drive:
[[0]](https://drive.google.com/file/d/1D7VL43kIV9uJrdSCYl53j89RE2K-IoQA/view?usp=sharing)
Original dataset webpage: [CUHK-SYSU Person Search Dataset](http://www.ee.cuhk.edu.hk/~xgwang/PS/dataset.html)
### PRW
Baidu NetDisk:
[[0]](https://pan.baidu.com/s/1iqOVKO57dL53OI1KOmWeGQ)
Google Drive:
[[0]](https://drive.google.com/file/d/116_mIdjgB-WJXGe8RYJDWxlFnc_4sqS8/view?usp=sharing)
Original dataset webpage: [Person Search in the Wild datset](http://www.liangzheng.com.cn/Project/project_prw.html)
### ETHZ (overlapping videos with MOT-16 removed):
Baidu NetDisk:
[[0]](https://pan.baidu.com/s/14EauGb2nLrcB3GRSlQ4K9Q)
Google Drive:
[[0]](https://drive.google.com/file/d/19QyGOCqn8K_rc9TXJ8UwLSxCx17e0GoY/view?usp=sharing)
Original dataset webpage: [ETHZ pedestrian datset](https://data.vision.ee.ethz.ch/cvl/aess/dataset/)
### MOT-17
Baidu NetDisk:
[[0]](https://pan.baidu.com/s/1lHa6UagcosRBz-_Y308GvQ)
Google Drive:
[[0]](https://drive.google.com/file/d/1ET-6w12yHNo8DKevOVgK1dBlYs739e_3/view?usp=sharing)
Original dataset webpage: [MOT-17](https://motchallenge.net/data/MOT17/)
### MOT-16 (for evaluation )
Baidu NetDisk:
[[0]](https://pan.baidu.com/s/10pUuB32Hro-h-KUZv8duiw)
Google Drive:
[[0]](https://drive.google.com/file/d/1254q3ruzBzgn4LUejDVsCtT05SIEieQg/view?usp=sharing)
Original dataset webpage: [MOT-16](https://motchallenge.net/data/MOT16/)
# Citation
Caltech:
```
@inproceedings{ dollarCVPR09peds,
author = "P. Doll\'ar and C. Wojek and B. Schiele and P. Perona",
title = "Pedestrian Detection: A Benchmark",
booktitle = "CVPR",
month = "June",
year = "2009",
city = "Miami",
}
```
Citypersons:
```
@INPROCEEDINGS{Shanshan2017CVPR,
Author = {Shanshan Zhang and Rodrigo Benenson and Bernt Schiele},
Title = {CityPersons: A Diverse Dataset for Pedestrian Detection},
Booktitle = {CVPR},
Year = {2017}
}
@INPROCEEDINGS{Cordts2016Cityscapes,
title={The Cityscapes Dataset for Semantic Urban Scene Understanding},
author={Cordts, Marius and Omran, Mohamed and Ramos, Sebastian and Rehfeld, Timo and Enzweiler, Markus and Benenson, Rodrigo and Franke, Uwe and Roth, Stefan and Schiele, Bernt},
booktitle={Proc. of the IEEE Conference on Computer Vision and Pattern Recognition (CVPR)},
year={2016}
}
```
CUHK-SYSU:
```
@inproceedings{xiaoli2017joint,
title={Joint Detection and Identification Feature Learning for Person Search},
author={Xiao, Tong and Li, Shuang and Wang, Bochao and Lin, Liang and Wang, Xiaogang},
booktitle={CVPR},
year={2017}
}
```
PRW:
```
@inproceedings{zheng2017person,
title={Person re-identification in the wild},
author={Zheng, Liang and Zhang, Hengheng and Sun, Shaoyan and Chandraker, Manmohan and Yang, Yi and Tian, Qi},
booktitle={Proceedings of the IEEE Conference on Computer Vision and Pattern Recognition},
pages={1367--1376},
year={2017}
}
```
ETHZ:
```
@InProceedings{eth_biwi_00534,
author = {A. Ess and B. Leibe and K. Schindler and and L. van Gool},
title = {A Mobile Vision System for Robust Multi-Person Tracking},
booktitle = {IEEE Conference on Computer Vision and Pattern Recognition (CVPR'08)},
year = {2008},
month = {June},
publisher = {IEEE Press},
keywords = {}
}
```
MOT-16&17:
```
@article{milan2016mot16,
title={MOT16: A benchmark for multi-object tracking},
author={Milan, Anton and Leal-Taix{\'e}, Laura and Reid, Ian and Roth, Stefan and Schindler, Konrad},
journal={arXiv preprint arXiv:1603.00831},
year={2016}
}
```
# Dataset Zoo
We provide several relevant datasets for training and evaluating the Joint Detection and Embedding (JDE) model.
Annotations are provided in a unified format. If you want to use these datasets, please **follow their licenses**,
and if you use these datasets in your research, please cite the original work (you can find the bibtex in the bottom).
## Data Format
All the dataset has the following structrue:
```
Caltech
|——————images
| └——————00001.jpg
| |—————— ...
| └——————0000N.jpg
└——————labels_with_ids
└——————00001.txt
|—————— ...
└——————0000N.txt
```
Every image corresponds to an annation text. Given an image path,
the annotation text path can be easily generated by replacing the string `images` with `labels_with_ids` and replacing `.jpg` with `.txt`.
In the annotation text, each line is a bounding box and has the following format,
```
[class] [identity] [x_center] [y_center] [width] [height]
```
The field `[class]` is not used in this project since we only care about a single class, i.e., pedestrian here.
The field `[identity]` is an integer from `0` to `num_identities - 1`, or `-1` if this box has no identity annotation.
***Note** that the values of `[x_center] [y_center] [width] [height]` are normalized by the width/height of the image, so they are float numbers ranging from 0 to 1.
## Download
### Caltech Pedestrian
Baidu NetDisk:
[[0]](https://pan.baidu.com/s/1sYBXXvQaXZ8TuNwQxMcAgg)
[[1]](https://pan.baidu.com/s/1lVO7YBzagex1xlzqPksaPw)
[[2]](https://pan.baidu.com/s/1PZXxxy_lrswaqTVg0GuHWg)
[[3]](https://pan.baidu.com/s/1M93NCo_E6naeYPpykmaNgA)
[[4]](https://pan.baidu.com/s/1ZXCdPNXfwbxQ4xCbVu5Dtw)
[[5]](https://pan.baidu.com/s/1kcZkh1tcEiBEJqnDtYuejg)
[[6]](https://pan.baidu.com/s/1sDjhtgdFrzR60KKxSjNb2A)
[[7]](https://pan.baidu.com/s/18Zvp_d33qj1pmutFDUbJyw)
Google Drive: [[annotation]](https://drive.google.com/file/d/1h8vxl_6tgi9QVYoer9XcY9YwNB32TE5k/view?usp=sharing) ,
please download all the `.tar` file from [this page](http://www.vision.caltech.edu/Image_Datasets/CaltechPedestrians/datasets/USA/) and unzip the images under `Caltech/images`
Original dataset webpage: [CaltechPedestrians](http://www.vision.caltech.edu/Image_Datasets/CaltechPedestrians/)
### CityPersons
Baidu NetDisk:
[[0]](https://pan.baidu.com/s/1g24doGOdkKqmbgbJf03vsw)
[[1]](https://pan.baidu.com/s/1mqDF9M5MdD3MGxSfe0ENsA)
[[2]](https://pan.baidu.com/s/1Qrbh9lQUaEORCIlfI25wdA)
[[3]](https://pan.baidu.com/s/1lw7shaffBgARDuk8mkkHhw)
Google Drive:
[[0]](https://drive.google.com/file/d/1DgLHqEkQUOj63mCrS_0UGFEM9BG8sIZs/view?usp=sharing)
[[1]](https://drive.google.com/file/d/1BH9Xz59UImIGUdYwUR-cnP1g7Ton_LcZ/view?usp=sharing)
[[2]](https://drive.google.com/file/d/1q_OltirP68YFvRWgYkBHLEFSUayjkKYE/view?usp=sharing)
[[3]](https://drive.google.com/file/d/1VSL0SFoQxPXnIdBamOZJzHrHJ1N2gsTW/view?usp=sharing)
Original dataset webpage: [Citypersons pedestrian detection dataset](https://bitbucket.org/shanshanzhang/citypersons)
### CUHK-SYSU
Baidu NetDisk:
[[0]](https://pan.baidu.com/s/1YFrlyB1WjcQmFW3Vt_sEaQ)
Google Drive:
[[0]](https://drive.google.com/file/d/1D7VL43kIV9uJrdSCYl53j89RE2K-IoQA/view?usp=sharing)
Original dataset webpage: [CUHK-SYSU Person Search Dataset](http://www.ee.cuhk.edu.hk/~xgwang/PS/dataset.html)
### PRW
Baidu NetDisk:
[[0]](https://pan.baidu.com/s/1iqOVKO57dL53OI1KOmWeGQ)
Google Drive:
[[0]](https://drive.google.com/file/d/116_mIdjgB-WJXGe8RYJDWxlFnc_4sqS8/view?usp=sharing)
Original dataset webpage: [Person Search in the Wild datset](http://www.liangzheng.com.cn/Project/project_prw.html)
### ETHZ (overlapping videos with MOT-16 removed):
Baidu NetDisk:
[[0]](https://pan.baidu.com/s/14EauGb2nLrcB3GRSlQ4K9Q)
Google Drive:
[[0]](https://drive.google.com/file/d/19QyGOCqn8K_rc9TXJ8UwLSxCx17e0GoY/view?usp=sharing)
Original dataset webpage: [ETHZ pedestrian datset](https://data.vision.ee.ethz.ch/cvl/aess/dataset/)
### MOT-17
Baidu NetDisk:
[[0]](https://pan.baidu.com/s/1lHa6UagcosRBz-_Y308GvQ)
Google Drive:
[[0]](https://drive.google.com/file/d/1ET-6w12yHNo8DKevOVgK1dBlYs739e_3/view?usp=sharing)
Original dataset webpage: [MOT-17](https://motchallenge.net/data/MOT17/)
### MOT-16 (for evaluation )
Baidu NetDisk:
[[0]](https://pan.baidu.com/s/10pUuB32Hro-h-KUZv8duiw)
Google Drive:
[[0]](https://drive.google.com/file/d/1254q3ruzBzgn4LUejDVsCtT05SIEieQg/view?usp=sharing)
Original dataset webpage: [MOT-16](https://motchallenge.net/data/MOT16/)
# Citation
Caltech:
```
@inproceedings{ dollarCVPR09peds,
author = "P. Doll\'ar and C. Wojek and B. Schiele and P. Perona",
title = "Pedestrian Detection: A Benchmark",
booktitle = "CVPR",
month = "June",
year = "2009",
city = "Miami",
}
```
Citypersons:
```
@INPROCEEDINGS{Shanshan2017CVPR,
Author = {Shanshan Zhang and Rodrigo Benenson and Bernt Schiele},
Title = {CityPersons: A Diverse Dataset for Pedestrian Detection},
Booktitle = {CVPR},
Year = {2017}
}
@INPROCEEDINGS{Cordts2016Cityscapes,
title={The Cityscapes Dataset for Semantic Urban Scene Understanding},
author={Cordts, Marius and Omran, Mohamed and Ramos, Sebastian and Rehfeld, Timo and Enzweiler, Markus and Benenson, Rodrigo and Franke, Uwe and Roth, Stefan and Schiele, Bernt},
booktitle={Proc. of the IEEE Conference on Computer Vision and Pattern Recognition (CVPR)},
year={2016}
}
```
CUHK-SYSU:
```
@inproceedings{xiaoli2017joint,
title={Joint Detection and Identification Feature Learning for Person Search},
author={Xiao, Tong and Li, Shuang and Wang, Bochao and Lin, Liang and Wang, Xiaogang},
booktitle={CVPR},
year={2017}
}
```
PRW:
```
@inproceedings{zheng2017person,
title={Person re-identification in the wild},
author={Zheng, Liang and Zhang, Hengheng and Sun, Shaoyan and Chandraker, Manmohan and Yang, Yi and Tian, Qi},
booktitle={Proceedings of the IEEE Conference on Computer Vision and Pattern Recognition},
pages={1367--1376},
year={2017}
}
```
ETHZ:
```
@InProceedings{eth_biwi_00534,
author = {A. Ess and B. Leibe and K. Schindler and and L. van Gool},
title = {A Mobile Vision System for Robust Multi-Person Tracking},
booktitle = {IEEE Conference on Computer Vision and Pattern Recognition (CVPR'08)},
year = {2008},
month = {June},
publisher = {IEEE Press},
keywords = {}
}
```
MOT-16&17:
```
@article{milan2016mot16,
title={MOT16: A benchmark for multi-object tracking},
author={Milan, Anton and Leal-Taix{\'e}, Laura and Reid, Ian and Roth, Stefan and Schindler, Konrad},
journal={arXiv preprint arXiv:1603.00831},
year={2016}
}
```

42
LICENSE
View File

@ -1,21 +1,21 @@
MIT License
Copyright (c) 2019 ZhongdaoWang
Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files (the "Software"), to deal
in the Software without restriction, including without limitation the rights
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
copies of the Software, and to permit persons to whom the Software is
furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in all
copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
SOFTWARE.
MIT License
Copyright (c) 2019 ZhongdaoWang
Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files (the "Software"), to deal
in the Software without restriction, including without limitation the rights
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
copies of the Software, and to permit persons to whom the Software is
furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in all
copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
SOFTWARE.

104
README.md
View File

@ -1,52 +1,52 @@
# Towards-Realtime-MOT
**NEWS:**
- **[2019.10.11]** Training and evaluation data uploaded! Please see [DATASET_ZOO.md](https://github.com/Zhongdao/Towards-Realtime-MOT/blob/master/DATASET_ZOO.md) for details.
- **[2019.10.01]** Demo code and pre-trained model released!
## Introduction
This repo is the a codebase of the Joint Detection and Embedding (JDE) model. JDE is a fast and high-performance multiple-object tracker that learns the object detection task and appearance embedding task simutaneously in a shared neural network. Techical details are described in our [arXiv preprint paper](https://arxiv.org/pdf/1909.12605v1.pdf). By using this repo, you can simply achieve **MOTA 64%+** on the "private" protocol of [MOT-16 challenge](https://motchallenge.net/tracker/JDE), and with a near real-time speed at **18~24 FPS** (Note this speed is for the entire system, including the detection step! ) .
We hope this repo will help researches/engineers to develop more practical MOT systems. For algorithm development, we provide training data, baseline models and evaluation methods to make a level playground. For application usage, we also provide a small video demo that takes raw videos as input without any bells and whistles.
## Requirements
* Python 3.6
* [Pytorch](https://pytorch.org) >= 1.0.1
* [syncbn](https://github.com/ytoon/Synchronized-BatchNorm-PyTorch) (Optional, compile and place it under utils/syncbn, or simply replace with nn.BatchNorm [here](https://github.com/Zhongdao/Towards-Realtime-MOT/blob/master/models.py#L12))
* [maskrcnn-benchmark](https://github.com/facebookresearch/maskrcnn-benchmark) (Their GPU NMS is used in this project)
* python-opencv
* ffmpeg (Optional, used in the video demo)
* [py-motmetrics](https://github.com/cheind/py-motmetrics) (Simply `pip install motmetrics`)
## Video Demo
<img src="assets/MOT16-03.gif" width="400"/> <img src="assets/MOT16-14.gif" width="400"/>
<img src="assets/IMG_0055.gif" width="400"/> <img src="assets/000011-00001.gif" width="400"/>
Usage:
```
python demo.py --input-video path/to/your/input/video --weights path/to/model/weights
--output-format video --output-root path/to/output/root
```
## Dataset zoo
Please see [DATASET_ZOO.md](https://github.com/Zhongdao/Towards-Realtime-MOT/blob/master/DATASET_ZOO.md) for detailed description of the training/evaluation datasets.
## Pretrained model and baseline models
Darknet-53 ImageNet pretrained: [[DarkNet Official]](https://pjreddie.com/media/files/darknet53.conv.74)
JDE-1088x608-uncertainty: [[Google Drive]](https://drive.google.com/open?id=1nlnuYfGNuHWZztQHXwVZSL_FvfE551pA) [[Baidu NetDisk]](https://pan.baidu.com/s/1Ifgn0Y_JZE65_qSrQM2l-Q)
## Test on MOT-16 Challenge
## Training instruction
- Download the training datasets.
- Edit `cfg/ccmcpe.json`, config the training/validation combinations. A dataset is represented by an image list, please see `data/*.train` for example.
- Run the training script:
```
CUDA_VISIBLE_DEIVCES=0,1,2,3,4,5,6,7 python train.py
```
We use 8x Nvidia Titan Xp to train the model, with a batch size of 32. You can adjust the batch size (and the learning rate together) according to how many GPUs your have. You can also train with smaller image size, which will bring faster inference time. But note the image size had better to be multiples of 32 (the down-sampling rate).
### Train with custom datasets
Adding custom datsets is quite simple, all you need to do is to organize your annotation files in the same format as in our training sets. Please refer to [DATASET_ZOO.md](https://github.com/Zhongdao/Towards-Realtime-MOT/blob/master/DATASET_ZOO.md) for the dataset format.
## Acknowledgement
A large portion of code is borrowed from [ultralytics/yolov3](https://github.com/ultralytics/yolov3) and [longcw/MOTDT](https://github.com/longcw/MOTDT), many thanks to their wonderful work!
# Towards-Realtime-MOT
**NEWS:**
- **[2019.10.11]** Training and evaluation data uploaded! Please see [DATASET_ZOO.md](https://github.com/Zhongdao/Towards-Realtime-MOT/blob/master/DATASET_ZOO.md) for details.
- **[2019.10.01]** Demo code and pre-trained model released!
## Introduction
This repo is the a codebase of the Joint Detection and Embedding (JDE) model. JDE is a fast and high-performance multiple-object tracker that learns the object detection task and appearance embedding task simutaneously in a shared neural network. Techical details are described in our [arXiv preprint paper](https://arxiv.org/pdf/1909.12605v1.pdf). By using this repo, you can simply achieve **MOTA 64%+** on the "private" protocol of [MOT-16 challenge](https://motchallenge.net/tracker/JDE), and with a near real-time speed at **18~24 FPS** (Note this speed is for the entire system, including the detection step! ) .
We hope this repo will help researches/engineers to develop more practical MOT systems. For algorithm development, we provide training data, baseline models and evaluation methods to make a level playground. For application usage, we also provide a small video demo that takes raw videos as input without any bells and whistles.
## Requirements
* Python 3.6
* [Pytorch](https://pytorch.org) >= 1.0.1
* [syncbn](https://github.com/ytoon/Synchronized-BatchNorm-PyTorch) (Optional, compile and place it under utils/syncbn, or simply replace with nn.BatchNorm [here](https://github.com/Zhongdao/Towards-Realtime-MOT/blob/master/models.py#L12))
* [maskrcnn-benchmark](https://github.com/facebookresearch/maskrcnn-benchmark) (Their GPU NMS is used in this project)
* python-opencv
* ffmpeg (Optional, used in the video demo)
* [py-motmetrics](https://github.com/cheind/py-motmetrics) (Simply `pip install motmetrics`)
## Video Demo
<img src="assets/MOT16-03.gif" width="400"/> <img src="assets/MOT16-14.gif" width="400"/>
<img src="assets/IMG_0055.gif" width="400"/> <img src="assets/000011-00001.gif" width="400"/>
Usage:
```
python demo.py --input-video path/to/your/input/video --weights path/to/model/weights
--output-format video --output-root path/to/output/root
```
## Dataset zoo
Please see [DATASET_ZOO.md](https://github.com/Zhongdao/Towards-Realtime-MOT/blob/master/DATASET_ZOO.md) for detailed description of the training/evaluation datasets.
## Pretrained model and baseline models
Darknet-53 ImageNet pretrained: [[DarkNet Official]](https://pjreddie.com/media/files/darknet53.conv.74)
JDE-1088x608-uncertainty: [[Google Drive]](https://drive.google.com/open?id=1nlnuYfGNuHWZztQHXwVZSL_FvfE551pA) [[Baidu NetDisk]](https://pan.baidu.com/s/1Ifgn0Y_JZE65_qSrQM2l-Q)
## Test on MOT-16 Challenge
## Training instruction
- Download the training datasets.
- Edit `cfg/ccmcpe.json`, config the training/validation combinations. A dataset is represented by an image list, please see `data/*.train` for example.
- Run the training script:
```
CUDA_VISIBLE_DEIVCES=0,1,2,3,4,5,6,7 python train.py
```
We use 8x Nvidia Titan Xp to train the model, with a batch size of 32. You can adjust the batch size (and the learning rate together) according to how many GPUs your have. You can also train with smaller image size, which will bring faster inference time. But note the image size had better to be multiples of 32 (the down-sampling rate).
### Train with custom datasets
Adding custom datsets is quite simple, all you need to do is to organize your annotation files in the same format as in our training sets. Please refer to [DATASET_ZOO.md](https://github.com/Zhongdao/Towards-Realtime-MOT/blob/master/DATASET_ZOO.md) for the dataset format.
## Acknowledgement
A large portion of code is borrowed from [ultralytics/yolov3](https://github.com/ultralytics/yolov3) and [longcw/MOTDT](https://github.com/longcw/MOTDT), many thanks to their wonderful work!

View File

@ -1,24 +1,26 @@
{
"root":"/home/wangzd/datasets/MOT",
"train":
{
"mot17":"./data/mot17.train",
"caltech":"./data/caltech.train",
"citypersons":"./data/citypersons.train",
"cuhksysu":"./data/cuhksysu.train",
"prw":"./data/prw.train",
"eth":"./data/eth.train"
},
"test_emb":
{
"caltech":"./data/caltech.10k.val",
"cuhksysu":"./data/cuhksysu.val",
"prw":"./data/prw.val"
},
"test":
{
"mot19":"./data/mot19.train",
"caltech":"./data/caltech.val",
"citypersons":"./data/citypersons.val"
}
}
{
"root":"/home/wangzd/datasets/MOT",
"train":
{
"mot17":"./data/mot17.train",
"caltech":"./data/caltech.train",
"citypersons":"./data/citypersons.train",
"cuhksysu":"./data/cuhksysu.train",
"prw":"./data/prw.train",
"eth":"./data/eth.train",
"03":"./data/mot16-03.test",
"01":"./data/mot16-01.test",
"14":"./data/mot16-14.test"
},
"test_emb":
{
"caltech":"./data/caltech.10k.val",
"cuhksysu":"./data/cuhksysu.val",
"prw":"./data/prw.val"
},
"test":
{
"caltech":"./data/caltech.val",
"citypersons":"./data/citypersons.val"
}
}

File diff suppressed because it is too large Load Diff

833
cfg/yolov3_864x480.cfg Normal file
View File

@ -0,0 +1,833 @@
[net]
# Testing
#batch=1
#subdivisions=1
# Training
batch=16
subdivisions=1
width=480
height=864
channels=3
momentum=0.9
decay=0.0005
angle=0
saturation = 1.5
exposure = 1.5
hue=.1
learning_rate=0.001
burn_in=1000
max_batches = 500200
policy=steps
steps=400000,450000
scales=.1,.1
[convolutional]
batch_normalize=1
filters=32
size=3
stride=1
pad=1
activation=leaky
# Downsample
[convolutional]
batch_normalize=1
filters=64
size=3
stride=2
pad=1
activation=leaky
[convolutional]
batch_normalize=1
filters=32
size=1
stride=1
pad=1
activation=leaky
[convolutional]
batch_normalize=1
filters=64
size=3
stride=1
pad=1
activation=leaky
[shortcut]
from=-3
activation=linear
# Downsample
[convolutional]
batch_normalize=1
filters=128
size=3
stride=2
pad=1
activation=leaky
[convolutional]
batch_normalize=1
filters=64
size=1
stride=1
pad=1
activation=leaky
[convolutional]
batch_normalize=1
filters=128
size=3
stride=1
pad=1
activation=leaky
[shortcut]
from=-3
activation=linear
[convolutional]
batch_normalize=1
filters=64
size=1
stride=1
pad=1
activation=leaky
[convolutional]
batch_normalize=1
filters=128
size=3
stride=1
pad=1
activation=leaky
[shortcut]
from=-3
activation=linear
# Downsample
[convolutional]
batch_normalize=1
filters=256
size=3
stride=2
pad=1
activation=leaky
[convolutional]
batch_normalize=1
filters=128
size=1
stride=1
pad=1
activation=leaky
[convolutional]
batch_normalize=1
filters=256
size=3
stride=1
pad=1
activation=leaky
[shortcut]
from=-3
activation=linear
[convolutional]
batch_normalize=1
filters=128
size=1
stride=1
pad=1
activation=leaky
[convolutional]
batch_normalize=1
filters=256
size=3
stride=1
pad=1
activation=leaky
[shortcut]
from=-3
activation=linear
[convolutional]
batch_normalize=1
filters=128
size=1
stride=1
pad=1
activation=leaky
[convolutional]
batch_normalize=1
filters=256
size=3
stride=1
pad=1
activation=leaky
[shortcut]
from=-3
activation=linear
[convolutional]
batch_normalize=1
filters=128
size=1
stride=1
pad=1
activation=leaky
[convolutional]
batch_normalize=1
filters=256
size=3
stride=1
pad=1
activation=leaky
[shortcut]
from=-3
activation=linear
[convolutional]
batch_normalize=1
filters=128
size=1
stride=1
pad=1
activation=leaky
[convolutional]
batch_normalize=1
filters=256
size=3
stride=1
pad=1
activation=leaky
[shortcut]
from=-3
activation=linear
[convolutional]
batch_normalize=1
filters=128
size=1
stride=1
pad=1
activation=leaky
[convolutional]
batch_normalize=1
filters=256
size=3
stride=1
pad=1
activation=leaky
[shortcut]
from=-3
activation=linear
[convolutional]
batch_normalize=1
filters=128
size=1
stride=1
pad=1
activation=leaky
[convolutional]
batch_normalize=1
filters=256
size=3
stride=1
pad=1
activation=leaky
[shortcut]
from=-3
activation=linear
[convolutional]
batch_normalize=1
filters=128
size=1
stride=1
pad=1
activation=leaky
[convolutional]
batch_normalize=1
filters=256
size=3
stride=1
pad=1
activation=leaky
[shortcut]
from=-3
activation=linear
# Downsample
[convolutional]
batch_normalize=1
filters=512
size=3
stride=2
pad=1
activation=leaky
[convolutional]
batch_normalize=1
filters=256
size=1
stride=1
pad=1
activation=leaky
[convolutional]
batch_normalize=1
filters=512
size=3
stride=1
pad=1
activation=leaky
[shortcut]
from=-3
activation=linear
[convolutional]
batch_normalize=1
filters=256
size=1
stride=1
pad=1
activation=leaky
[convolutional]
batch_normalize=1
filters=512
size=3
stride=1
pad=1
activation=leaky
[shortcut]
from=-3
activation=linear
[convolutional]
batch_normalize=1
filters=256
size=1
stride=1
pad=1
activation=leaky
[convolutional]
batch_normalize=1
filters=512
size=3
stride=1
pad=1
activation=leaky
[shortcut]
from=-3
activation=linear
[convolutional]
batch_normalize=1
filters=256
size=1
stride=1
pad=1
activation=leaky
[convolutional]
batch_normalize=1
filters=512
size=3
stride=1
pad=1
activation=leaky
[shortcut]
from=-3
activation=linear
[convolutional]
batch_normalize=1
filters=256
size=1
stride=1
pad=1
activation=leaky
[convolutional]
batch_normalize=1
filters=512
size=3
stride=1
pad=1
activation=leaky
[shortcut]
from=-3
activation=linear
[convolutional]
batch_normalize=1
filters=256
size=1
stride=1
pad=1
activation=leaky
[convolutional]
batch_normalize=1
filters=512
size=3
stride=1
pad=1
activation=leaky
[shortcut]
from=-3
activation=linear
[convolutional]
batch_normalize=1
filters=256
size=1
stride=1
pad=1
activation=leaky
[convolutional]
batch_normalize=1
filters=512
size=3
stride=1
pad=1
activation=leaky
[shortcut]
from=-3
activation=linear
[convolutional]
batch_normalize=1
filters=256
size=1
stride=1
pad=1
activation=leaky
[convolutional]
batch_normalize=1
filters=512
size=3
stride=1
pad=1
activation=leaky
[shortcut]
from=-3
activation=linear
# Downsample
[convolutional]
batch_normalize=1
filters=1024
size=3
stride=2
pad=1
activation=leaky
[convolutional]
batch_normalize=1
filters=512
size=1
stride=1
pad=1
activation=leaky
[convolutional]
batch_normalize=1
filters=1024
size=3
stride=1
pad=1
activation=leaky
[shortcut]
from=-3
activation=linear
[convolutional]
batch_normalize=1
filters=512
size=1
stride=1
pad=1
activation=leaky
[convolutional]
batch_normalize=1
filters=1024
size=3
stride=1
pad=1
activation=leaky
[shortcut]
from=-3
activation=linear
[convolutional]
batch_normalize=1
filters=512
size=1
stride=1
pad=1
activation=leaky
[convolutional]
batch_normalize=1
filters=1024
size=3
stride=1
pad=1
activation=leaky
[shortcut]
from=-3
activation=linear
[convolutional]
batch_normalize=1
filters=512
size=1
stride=1
pad=1
activation=leaky
[convolutional]
batch_normalize=1
filters=1024
size=3
stride=1
pad=1
activation=leaky
[shortcut]
from=-3
activation=linear
######################
[convolutional]
batch_normalize=1
filters=512
size=1
stride=1
pad=1
activation=leaky
[convolutional]
batch_normalize=1
size=3
stride=1
pad=1
filters=1024
activation=leaky
[convolutional]
batch_normalize=1
filters=512
size=1
stride=1
pad=1
activation=leaky
[convolutional]
batch_normalize=1
size=3
stride=1
pad=1
filters=1024
activation=leaky
[convolutional]
batch_normalize=1
filters=512
size=1
stride=1
pad=1
activation=leaky
[convolutional]
batch_normalize=1
size=3
stride=1
pad=1
filters=1024
activation=leaky
[convolutional]
size=1
stride=1
pad=1
filters=24
activation=linear
######### embedding ###########
[route]
layers = -3
[convolutional]
size=3
stride=1
pad=1
filters=512
activation=linear
[route]
layers = -3, -1
###############################
[yolo]
mask = 8,9,10,11
anchors = 6,19, 9,27, 13,38, 18,54, 25,76, 36,107, 51,152, 71,215, 102,305, 143, 429, 203,508, 407,508
classes=1
num=12
jitter=.3
ignore_thresh = .7
truth_thresh = 1
random=1
[route]
layers = -7
[convolutional]
batch_normalize=1
filters=256
size=1
stride=1
pad=1
activation=leaky
[upsample]
stride=2
[route]
layers = -1, 61
[convolutional]
batch_normalize=1
filters=256
size=1
stride=1
pad=1
activation=leaky
[convolutional]
batch_normalize=1
size=3
stride=1
pad=1
filters=512
activation=leaky
[convolutional]
batch_normalize=1
filters=256
size=1
stride=1
pad=1
activation=leaky
[convolutional]
batch_normalize=1
size=3
stride=1
pad=1
filters=512
activation=leaky
[convolutional]
batch_normalize=1
filters=256
size=1
stride=1
pad=1
activation=leaky
[convolutional]
batch_normalize=1
size=3
stride=1
pad=1
filters=512
activation=leaky
[convolutional]
size=1
stride=1
pad=1
filters=24
activation=linear
######### embedding ###########
[route]
layers = -3
[convolutional]
size=3
stride=1
pad=1
filters=512
activation=linear
[route]
layers = -3, -1
###############################
[yolo]
mask = 4,5,6,7
anchors = 6,19, 9,27, 13,38, 18,54, 25,76, 36,107, 51,152, 71,215, 102,305, 143, 429, 203,508, 407,508
classes=1
num=12
jitter=.3
ignore_thresh = .7
truth_thresh = 1
random=1
[route]
layers = -7
[convolutional]
batch_normalize=1
filters=128
size=1
stride=1
pad=1
activation=leaky
[upsample]
stride=2
[route]
layers = -1, 36
[convolutional]
batch_normalize=1
filters=128
size=1
stride=1
pad=1
activation=leaky
[convolutional]
batch_normalize=1
size=3
stride=1
pad=1
filters=256
activation=leaky
[convolutional]
batch_normalize=1
filters=128
size=1
stride=1
pad=1
activation=leaky
[convolutional]
batch_normalize=1
size=3
stride=1
pad=1
filters=256
activation=leaky
[convolutional]
batch_normalize=1
filters=128
size=1
stride=1
pad=1
activation=leaky
[convolutional]
batch_normalize=1
size=3
stride=1
pad=1
filters=256
activation=leaky
[convolutional]
size=1
stride=1
pad=1
filters=24
activation=linear
######### embedding ###########
[route]
layers = -3
[convolutional]
size=3
stride=1
pad=1
filters=512
activation=linear
[route]
layers = -3, -1
###############################
[yolo]
mask = 0,1,2,3
anchors = 6,19, 9,27, 13,38, 18,54, 25,76, 36,107, 51,152, 71,215, 102,305, 143, 429, 203,508, 407,508
classes=1
num=12
jitter=.3
ignore_thresh = .7
truth_thresh = 1
random=1

File diff suppressed because it is too large Load Diff

File diff suppressed because it is too large Load Diff

File diff suppressed because it is too large Load Diff

File diff suppressed because it is too large Load Diff

View File

@ -1,441 +1,441 @@
Cityscapes/images/val/frankfurt/frankfurt_000001_066574_leftImg8bit.png
Cityscapes/images/val/frankfurt/frankfurt_000001_067474_leftImg8bit.png
Cityscapes/images/val/frankfurt/frankfurt_000001_077092_leftImg8bit.png
Cityscapes/images/val/frankfurt/frankfurt_000001_028590_leftImg8bit.png
Cityscapes/images/val/frankfurt/frankfurt_000001_028335_leftImg8bit.png
Cityscapes/images/val/frankfurt/frankfurt_000001_050149_leftImg8bit.png
Cityscapes/images/val/frankfurt/frankfurt_000001_043395_leftImg8bit.png
Cityscapes/images/val/frankfurt/frankfurt_000001_059119_leftImg8bit.png
Cityscapes/images/val/frankfurt/frankfurt_000000_005543_leftImg8bit.png
Cityscapes/images/val/frankfurt/frankfurt_000001_010156_leftImg8bit.png
Cityscapes/images/val/frankfurt/frankfurt_000001_064130_leftImg8bit.png
Cityscapes/images/val/frankfurt/frankfurt_000000_001016_leftImg8bit.png
Cityscapes/images/val/frankfurt/frankfurt_000000_003025_leftImg8bit.png
Cityscapes/images/val/frankfurt/frankfurt_000001_071288_leftImg8bit.png
Cityscapes/images/val/frankfurt/frankfurt_000001_055062_leftImg8bit.png
Cityscapes/images/val/frankfurt/frankfurt_000000_012868_leftImg8bit.png
Cityscapes/images/val/frankfurt/frankfurt_000001_048196_leftImg8bit.png
Cityscapes/images/val/frankfurt/frankfurt_000001_044658_leftImg8bit.png
Cityscapes/images/val/frankfurt/frankfurt_000001_080391_leftImg8bit.png
Cityscapes/images/val/frankfurt/frankfurt_000001_080091_leftImg8bit.png
Cityscapes/images/val/frankfurt/frankfurt_000000_007365_leftImg8bit.png
Cityscapes/images/val/frankfurt/frankfurt_000001_013710_leftImg8bit.png
Cityscapes/images/val/frankfurt/frankfurt_000000_013942_leftImg8bit.png
Cityscapes/images/val/frankfurt/frankfurt_000001_007973_leftImg8bit.png
Cityscapes/images/val/frankfurt/frankfurt_000001_020693_leftImg8bit.png
Cityscapes/images/val/frankfurt/frankfurt_000000_016286_leftImg8bit.png
Cityscapes/images/val/frankfurt/frankfurt_000001_073088_leftImg8bit.png
Cityscapes/images/val/frankfurt/frankfurt_000001_066438_leftImg8bit.png
Cityscapes/images/val/frankfurt/frankfurt_000001_030067_leftImg8bit.png
Cityscapes/images/val/frankfurt/frankfurt_000001_067178_leftImg8bit.png
Cityscapes/images/val/frankfurt/frankfurt_000000_014480_leftImg8bit.png
Cityscapes/images/val/frankfurt/frankfurt_000001_073464_leftImg8bit.png
Cityscapes/images/val/frankfurt/frankfurt_000000_011810_leftImg8bit.png
Cityscapes/images/val/frankfurt/frankfurt_000001_005898_leftImg8bit.png
Cityscapes/images/val/frankfurt/frankfurt_000001_019854_leftImg8bit.png
Cityscapes/images/val/frankfurt/frankfurt_000001_055709_leftImg8bit.png
Cityscapes/images/val/frankfurt/frankfurt_000001_002512_leftImg8bit.png
Cityscapes/images/val/frankfurt/frankfurt_000001_007622_leftImg8bit.png
Cityscapes/images/val/frankfurt/frankfurt_000001_054077_leftImg8bit.png
Cityscapes/images/val/frankfurt/frankfurt_000001_060545_leftImg8bit.png
Cityscapes/images/val/frankfurt/frankfurt_000001_063045_leftImg8bit.png
Cityscapes/images/val/frankfurt/frankfurt_000001_032556_leftImg8bit.png
Cityscapes/images/val/frankfurt/frankfurt_000001_064305_leftImg8bit.png
Cityscapes/images/val/frankfurt/frankfurt_000001_049698_leftImg8bit.png
Cityscapes/images/val/frankfurt/frankfurt_000001_007857_leftImg8bit.png
Cityscapes/images/val/frankfurt/frankfurt_000001_012519_leftImg8bit.png
Cityscapes/images/val/frankfurt/frankfurt_000001_034816_leftImg8bit.png
Cityscapes/images/val/frankfurt/frankfurt_000001_032018_leftImg8bit.png
Cityscapes/images/val/frankfurt/frankfurt_000001_019969_leftImg8bit.png
Cityscapes/images/val/frankfurt/frankfurt_000001_025713_leftImg8bit.png
Cityscapes/images/val/frankfurt/frankfurt_000001_065617_leftImg8bit.png
Cityscapes/images/val/frankfurt/frankfurt_000000_017228_leftImg8bit.png
Cityscapes/images/val/frankfurt/frankfurt_000001_062016_leftImg8bit.png
Cityscapes/images/val/frankfurt/frankfurt_000001_009504_leftImg8bit.png
Cityscapes/images/val/frankfurt/frankfurt_000000_010763_leftImg8bit.png
Cityscapes/images/val/frankfurt/frankfurt_000001_030669_leftImg8bit.png
Cityscapes/images/val/frankfurt/frankfurt_000001_002646_leftImg8bit.png
Cityscapes/images/val/frankfurt/frankfurt_000001_001464_leftImg8bit.png
Cityscapes/images/val/frankfurt/frankfurt_000001_062396_leftImg8bit.png
Cityscapes/images/val/frankfurt/frankfurt_000000_008206_leftImg8bit.png
Cityscapes/images/val/frankfurt/frankfurt_000001_004327_leftImg8bit.png
Cityscapes/images/val/frankfurt/frankfurt_000001_075984_leftImg8bit.png
Cityscapes/images/val/frankfurt/frankfurt_000001_052120_leftImg8bit.png
Cityscapes/images/val/frankfurt/frankfurt_000001_020046_leftImg8bit.png
Cityscapes/images/val/frankfurt/frankfurt_000001_046504_leftImg8bit.png
Cityscapes/images/val/frankfurt/frankfurt_000001_078803_leftImg8bit.png
Cityscapes/images/val/frankfurt/frankfurt_000001_011835_leftImg8bit.png
Cityscapes/images/val/frankfurt/frankfurt_000001_038418_leftImg8bit.png
Cityscapes/images/val/frankfurt/frankfurt_000001_012699_leftImg8bit.png
Cityscapes/images/val/frankfurt/frankfurt_000001_031266_leftImg8bit.png
Cityscapes/images/val/frankfurt/frankfurt_000000_022254_leftImg8bit.png
Cityscapes/images/val/frankfurt/frankfurt_000001_068208_leftImg8bit.png
Cityscapes/images/val/frankfurt/frankfurt_000001_007285_leftImg8bit.png
Cityscapes/images/val/frankfurt/frankfurt_000001_039895_leftImg8bit.png
Cityscapes/images/val/frankfurt/frankfurt_000001_073243_leftImg8bit.png
Cityscapes/images/val/frankfurt/frankfurt_000001_038245_leftImg8bit.png
Cityscapes/images/val/frankfurt/frankfurt_000001_046779_leftImg8bit.png
Cityscapes/images/val/frankfurt/frankfurt_000001_025921_leftImg8bit.png
Cityscapes/images/val/frankfurt/frankfurt_000000_016005_leftImg8bit.png
Cityscapes/images/val/frankfurt/frankfurt_000001_044787_leftImg8bit.png
Cityscapes/images/val/frankfurt/frankfurt_000001_054415_leftImg8bit.png
Cityscapes/images/val/frankfurt/frankfurt_000001_033655_leftImg8bit.png
Cityscapes/images/val/frankfurt/frankfurt_000001_049209_leftImg8bit.png
Cityscapes/images/val/frankfurt/frankfurt_000001_061763_leftImg8bit.png
Cityscapes/images/val/frankfurt/frankfurt_000000_022797_leftImg8bit.png
Cityscapes/images/val/frankfurt/frankfurt_000001_028232_leftImg8bit.png
Cityscapes/images/val/frankfurt/frankfurt_000001_083852_leftImg8bit.png
Cityscapes/images/val/frankfurt/frankfurt_000001_058057_leftImg8bit.png
Cityscapes/images/val/frankfurt/frankfurt_000001_020287_leftImg8bit.png
Cityscapes/images/val/frankfurt/frankfurt_000001_012738_leftImg8bit.png
Cityscapes/images/val/frankfurt/frankfurt_000001_043564_leftImg8bit.png
Cityscapes/images/val/frankfurt/frankfurt_000000_005898_leftImg8bit.png
Cityscapes/images/val/frankfurt/frankfurt_000001_049770_leftImg8bit.png
Cityscapes/images/val/frankfurt/frankfurt_000001_065850_leftImg8bit.png
Cityscapes/images/val/frankfurt/frankfurt_000001_044525_leftImg8bit.png
Cityscapes/images/val/frankfurt/frankfurt_000001_009854_leftImg8bit.png
Cityscapes/images/val/frankfurt/frankfurt_000001_077434_leftImg8bit.png
Cityscapes/images/val/frankfurt/frankfurt_000001_062250_leftImg8bit.png
Cityscapes/images/val/frankfurt/frankfurt_000001_038645_leftImg8bit.png
Cityscapes/images/val/frankfurt/frankfurt_000001_057181_leftImg8bit.png
Cityscapes/images/val/frankfurt/frankfurt_000001_062509_leftImg8bit.png
Cityscapes/images/val/frankfurt/frankfurt_000000_020215_leftImg8bit.png
Cityscapes/images/val/frankfurt/frankfurt_000000_020321_leftImg8bit.png
Cityscapes/images/val/frankfurt/frankfurt_000001_016029_leftImg8bit.png
Cityscapes/images/val/frankfurt/frankfurt_000001_068063_leftImg8bit.png
Cityscapes/images/val/frankfurt/frankfurt_000000_017476_leftImg8bit.png
Cityscapes/images/val/frankfurt/frankfurt_000001_011162_leftImg8bit.png
Cityscapes/images/val/frankfurt/frankfurt_000000_011461_leftImg8bit.png
Cityscapes/images/val/frankfurt/frankfurt_000000_013240_leftImg8bit.png
Cityscapes/images/val/frankfurt/frankfurt_000001_079206_leftImg8bit.png
Cityscapes/images/val/frankfurt/frankfurt_000001_018113_leftImg8bit.png
Cityscapes/images/val/frankfurt/frankfurt_000001_066092_leftImg8bit.png
Cityscapes/images/val/frankfurt/frankfurt_000001_051807_leftImg8bit.png
Cityscapes/images/val/frankfurt/frankfurt_000001_058176_leftImg8bit.png
Cityscapes/images/val/frankfurt/frankfurt_000000_015676_leftImg8bit.png
Cityscapes/images/val/frankfurt/frankfurt_000000_018797_leftImg8bit.png
Cityscapes/images/val/frankfurt/frankfurt_000001_012870_leftImg8bit.png
Cityscapes/images/val/frankfurt/frankfurt_000001_064925_leftImg8bit.png
Cityscapes/images/val/frankfurt/frankfurt_000001_023235_leftImg8bit.png
Cityscapes/images/val/frankfurt/frankfurt_000001_010600_leftImg8bit.png
Cityscapes/images/val/frankfurt/frankfurt_000001_077233_leftImg8bit.png
Cityscapes/images/val/frankfurt/frankfurt_000001_067735_leftImg8bit.png
Cityscapes/images/val/frankfurt/frankfurt_000001_014221_leftImg8bit.png
Cityscapes/images/val/frankfurt/frankfurt_000000_021879_leftImg8bit.png
Cityscapes/images/val/frankfurt/frankfurt_000001_041664_leftImg8bit.png
Cityscapes/images/val/frankfurt/frankfurt_000001_003588_leftImg8bit.png
Cityscapes/images/val/frankfurt/frankfurt_000000_009561_leftImg8bit.png
Cityscapes/images/val/frankfurt/frankfurt_000001_062793_leftImg8bit.png
Cityscapes/images/val/frankfurt/frankfurt_000001_014565_leftImg8bit.png
Cityscapes/images/val/frankfurt/frankfurt_000001_083029_leftImg8bit.png
Cityscapes/images/val/frankfurt/frankfurt_000000_013067_leftImg8bit.png
Cityscapes/images/val/frankfurt/frankfurt_000000_009688_leftImg8bit.png
Cityscapes/images/val/frankfurt/frankfurt_000001_055387_leftImg8bit.png
Cityscapes/images/val/frankfurt/frankfurt_000001_023769_leftImg8bit.png
Cityscapes/images/val/frankfurt/frankfurt_000001_013016_leftImg8bit.png
Cityscapes/images/val/frankfurt/frankfurt_000001_083199_leftImg8bit.png
Cityscapes/images/val/frankfurt/frankfurt_000001_034047_leftImg8bit.png
Cityscapes/images/val/frankfurt/frankfurt_000001_049298_leftImg8bit.png
Cityscapes/images/val/frankfurt/frankfurt_000001_035144_leftImg8bit.png
Cityscapes/images/val/frankfurt/frankfurt_000001_008688_leftImg8bit.png
Cityscapes/images/val/frankfurt/frankfurt_000001_015328_leftImg8bit.png
Cityscapes/images/val/frankfurt/frankfurt_000001_024927_leftImg8bit.png
Cityscapes/images/val/frankfurt/frankfurt_000001_032942_leftImg8bit.png
Cityscapes/images/val/frankfurt/frankfurt_000001_027325_leftImg8bit.png
Cityscapes/images/val/frankfurt/frankfurt_000001_014741_leftImg8bit.png
Cityscapes/images/val/frankfurt/frankfurt_000000_009969_leftImg8bit.png
Cityscapes/images/val/frankfurt/frankfurt_000001_067295_leftImg8bit.png
Cityscapes/images/val/frankfurt/frankfurt_000000_012121_leftImg8bit.png
Cityscapes/images/val/frankfurt/frankfurt_000001_004859_leftImg8bit.png
Cityscapes/images/val/frankfurt/frankfurt_000000_015389_leftImg8bit.png
Cityscapes/images/val/frankfurt/frankfurt_000001_068682_leftImg8bit.png
Cityscapes/images/val/frankfurt/frankfurt_000001_021406_leftImg8bit.png
Cityscapes/images/val/frankfurt/frankfurt_000001_050686_leftImg8bit.png
Cityscapes/images/val/frankfurt/frankfurt_000001_000538_leftImg8bit.png
Cityscapes/images/val/frankfurt/frankfurt_000001_082087_leftImg8bit.png
Cityscapes/images/val/frankfurt/frankfurt_000001_057954_leftImg8bit.png
Cityscapes/images/val/frankfurt/frankfurt_000001_054219_leftImg8bit.png
Cityscapes/images/val/frankfurt/frankfurt_000000_003920_leftImg8bit.png
Cityscapes/images/val/frankfurt/frankfurt_000001_040732_leftImg8bit.png
Cityscapes/images/val/frankfurt/frankfurt_000001_038844_leftImg8bit.png
Cityscapes/images/val/frankfurt/frankfurt_000001_042733_leftImg8bit.png
Cityscapes/images/val/frankfurt/frankfurt_000001_037705_leftImg8bit.png
Cityscapes/images/val/frankfurt/frankfurt_000001_069633_leftImg8bit.png
Cityscapes/images/val/frankfurt/frankfurt_000001_005703_leftImg8bit.png
Cityscapes/images/val/frankfurt/frankfurt_000001_025512_leftImg8bit.png
Cityscapes/images/val/frankfurt/frankfurt_000001_002759_leftImg8bit.png
Cityscapes/images/val/frankfurt/frankfurt_000001_015091_leftImg8bit.png
Cityscapes/images/val/frankfurt/frankfurt_000001_080830_leftImg8bit.png
Cityscapes/images/val/frankfurt/frankfurt_000000_019607_leftImg8bit.png
Cityscapes/images/val/frankfurt/frankfurt_000001_009058_leftImg8bit.png
Cityscapes/images/val/frankfurt/frankfurt_000001_015768_leftImg8bit.png
Cityscapes/images/val/frankfurt/frankfurt_000001_042384_leftImg8bit.png
Cityscapes/images/val/frankfurt/frankfurt_000000_012009_leftImg8bit.png
Cityscapes/images/val/frankfurt/frankfurt_000001_068772_leftImg8bit.png
Cityscapes/images/val/frankfurt/frankfurt_000001_072155_leftImg8bit.png
Cityscapes/images/val/frankfurt/frankfurt_000001_058504_leftImg8bit.png
Cityscapes/images/val/frankfurt/frankfurt_000001_016273_leftImg8bit.png
Cityscapes/images/val/frankfurt/frankfurt_000001_060906_leftImg8bit.png
Cityscapes/images/val/frankfurt/frankfurt_000001_066832_leftImg8bit.png
Cityscapes/images/val/frankfurt/frankfurt_000000_009291_leftImg8bit.png
Cityscapes/images/val/frankfurt/frankfurt_000001_059642_leftImg8bit.png
Cityscapes/images/val/frankfurt/frankfurt_000001_064798_leftImg8bit.png
Cityscapes/images/val/frankfurt/frankfurt_000001_060422_leftImg8bit.png
Cityscapes/images/val/frankfurt/frankfurt_000000_001236_leftImg8bit.png
Cityscapes/images/val/frankfurt/frankfurt_000001_055172_leftImg8bit.png
Cityscapes/images/val/frankfurt/frankfurt_000001_005410_leftImg8bit.png
Cityscapes/images/val/frankfurt/frankfurt_000001_075296_leftImg8bit.png
Cityscapes/images/val/frankfurt/frankfurt_000001_005184_leftImg8bit.png
Cityscapes/images/val/frankfurt/frankfurt_000001_044413_leftImg8bit.png
Cityscapes/images/val/frankfurt/frankfurt_000000_002196_leftImg8bit.png
Cityscapes/images/val/frankfurt/frankfurt_000001_064651_leftImg8bit.png
Cityscapes/images/val/frankfurt/frankfurt_000001_014406_leftImg8bit.png
Cityscapes/images/val/frankfurt/frankfurt_000001_048355_leftImg8bit.png
Cityscapes/images/val/frankfurt/frankfurt_000001_051516_leftImg8bit.png
Cityscapes/images/val/frankfurt/frankfurt_000001_040575_leftImg8bit.png
Cityscapes/images/val/frankfurt/frankfurt_000001_017842_leftImg8bit.png
Cityscapes/images/val/frankfurt/frankfurt_000001_041074_leftImg8bit.png
Cityscapes/images/val/frankfurt/frankfurt_000001_057478_leftImg8bit.png
Cityscapes/images/val/frankfurt/frankfurt_000001_055306_leftImg8bit.png
Cityscapes/images/val/frankfurt/frankfurt_000001_062653_leftImg8bit.png
Cityscapes/images/val/frankfurt/frankfurt_000001_007407_leftImg8bit.png
Cityscapes/images/val/frankfurt/frankfurt_000001_048654_leftImg8bit.png
Cityscapes/images/val/frankfurt/frankfurt_000001_019698_leftImg8bit.png
Cityscapes/images/val/frankfurt/frankfurt_000001_049078_leftImg8bit.png
Cityscapes/images/val/frankfurt/frankfurt_000001_061682_leftImg8bit.png
Cityscapes/images/val/frankfurt/frankfurt_000001_055603_leftImg8bit.png
Cityscapes/images/val/frankfurt/frankfurt_000001_023369_leftImg8bit.png
Cityscapes/images/val/frankfurt/frankfurt_000001_067092_leftImg8bit.png
Cityscapes/images/val/frankfurt/frankfurt_000001_054884_leftImg8bit.png
Cityscapes/images/val/frankfurt/frankfurt_000001_058914_leftImg8bit.png
Cityscapes/images/val/frankfurt/frankfurt_000001_070099_leftImg8bit.png
Cityscapes/images/val/frankfurt/frankfurt_000000_000294_leftImg8bit.png
Cityscapes/images/val/frankfurt/frankfurt_000001_056580_leftImg8bit.png
Cityscapes/images/val/frankfurt/frankfurt_000001_032711_leftImg8bit.png
Cityscapes/images/val/frankfurt/frankfurt_000000_013382_leftImg8bit.png
Cityscapes/images/val/frankfurt/frankfurt_000001_010830_leftImg8bit.png
Cityscapes/images/val/frankfurt/frankfurt_000000_000576_leftImg8bit.png
Cityscapes/images/val/frankfurt/frankfurt_000000_020880_leftImg8bit.png
Cityscapes/images/val/frankfurt/frankfurt_000001_030310_leftImg8bit.png
Cityscapes/images/val/frankfurt/frankfurt_000001_011715_leftImg8bit.png
Cityscapes/images/val/frankfurt/frankfurt_000001_008200_leftImg8bit.png
Cityscapes/images/val/frankfurt/frankfurt_000001_052594_leftImg8bit.png
Cityscapes/images/val/frankfurt/frankfurt_000001_042098_leftImg8bit.png
Cityscapes/images/val/frankfurt/frankfurt_000000_011007_leftImg8bit.png
Cityscapes/images/val/frankfurt/frankfurt_000001_012038_leftImg8bit.png
Cityscapes/images/val/frankfurt/frankfurt_000000_001751_leftImg8bit.png
Cityscapes/images/val/frankfurt/frankfurt_000001_046126_leftImg8bit.png
Cityscapes/images/val/frankfurt/frankfurt_000001_053102_leftImg8bit.png
Cityscapes/images/val/frankfurt/frankfurt_000001_013496_leftImg8bit.png
Cityscapes/images/val/frankfurt/frankfurt_000001_046272_leftImg8bit.png
Cityscapes/images/val/frankfurt/frankfurt_000001_073911_leftImg8bit.png
Cityscapes/images/val/frankfurt/frankfurt_000000_021667_leftImg8bit.png
Cityscapes/images/val/frankfurt/frankfurt_000001_065160_leftImg8bit.png
Cityscapes/images/val/frankfurt/frankfurt_000001_059789_leftImg8bit.png
Cityscapes/images/val/frankfurt/frankfurt_000001_017101_leftImg8bit.png
Cityscapes/images/val/frankfurt/frankfurt_000001_044227_leftImg8bit.png
Cityscapes/images/val/frankfurt/frankfurt_000001_029600_leftImg8bit.png
Cityscapes/images/val/frankfurt/frankfurt_000000_008451_leftImg8bit.png
Cityscapes/images/val/frankfurt/frankfurt_000001_076502_leftImg8bit.png
Cityscapes/images/val/frankfurt/frankfurt_000001_029086_leftImg8bit.png
Cityscapes/images/val/frankfurt/frankfurt_000001_051737_leftImg8bit.png
Cityscapes/images/val/frankfurt/frankfurt_000001_031416_leftImg8bit.png
Cityscapes/images/val/frankfurt/frankfurt_000001_029236_leftImg8bit.png
Cityscapes/images/val/frankfurt/frankfurt_000001_004736_leftImg8bit.png
Cityscapes/images/val/frankfurt/frankfurt_000001_047178_leftImg8bit.png
Cityscapes/images/val/frankfurt/frankfurt_000001_010444_leftImg8bit.png
Cityscapes/images/val/frankfurt/frankfurt_000001_016462_leftImg8bit.png
Cityscapes/images/val/frankfurt/frankfurt_000001_028854_leftImg8bit.png
Cityscapes/images/val/frankfurt/frankfurt_000001_055538_leftImg8bit.png
Cityscapes/images/val/frankfurt/frankfurt_000001_021825_leftImg8bit.png
Cityscapes/images/val/frankfurt/frankfurt_000000_011074_leftImg8bit.png
Cityscapes/images/val/frankfurt/frankfurt_000001_071781_leftImg8bit.png
Cityscapes/images/val/frankfurt/frankfurt_000001_017459_leftImg8bit.png
Cityscapes/images/val/frankfurt/frankfurt_000001_054640_leftImg8bit.png
Cityscapes/images/val/frankfurt/frankfurt_000001_035864_leftImg8bit.png
Cityscapes/images/val/munster/munster_000129_000019_leftImg8bit.png
Cityscapes/images/val/munster/munster_000011_000019_leftImg8bit.png
Cityscapes/images/val/munster/munster_000044_000019_leftImg8bit.png
Cityscapes/images/val/munster/munster_000165_000019_leftImg8bit.png
Cityscapes/images/val/munster/munster_000078_000019_leftImg8bit.png
Cityscapes/images/val/munster/munster_000014_000019_leftImg8bit.png
Cityscapes/images/val/munster/munster_000086_000019_leftImg8bit.png
Cityscapes/images/val/munster/munster_000067_000019_leftImg8bit.png
Cityscapes/images/val/munster/munster_000097_000019_leftImg8bit.png
Cityscapes/images/val/munster/munster_000077_000019_leftImg8bit.png
Cityscapes/images/val/munster/munster_000138_000019_leftImg8bit.png
Cityscapes/images/val/munster/munster_000058_000019_leftImg8bit.png
Cityscapes/images/val/munster/munster_000030_000019_leftImg8bit.png
Cityscapes/images/val/munster/munster_000083_000019_leftImg8bit.png
Cityscapes/images/val/munster/munster_000085_000019_leftImg8bit.png
Cityscapes/images/val/munster/munster_000036_000019_leftImg8bit.png
Cityscapes/images/val/munster/munster_000026_000019_leftImg8bit.png
Cityscapes/images/val/munster/munster_000068_000019_leftImg8bit.png
Cityscapes/images/val/munster/munster_000064_000019_leftImg8bit.png
Cityscapes/images/val/munster/munster_000024_000019_leftImg8bit.png
Cityscapes/images/val/munster/munster_000135_000019_leftImg8bit.png
Cityscapes/images/val/munster/munster_000120_000019_leftImg8bit.png
Cityscapes/images/val/munster/munster_000041_000019_leftImg8bit.png
Cityscapes/images/val/munster/munster_000169_000019_leftImg8bit.png
Cityscapes/images/val/munster/munster_000144_000019_leftImg8bit.png
Cityscapes/images/val/munster/munster_000049_000019_leftImg8bit.png
Cityscapes/images/val/munster/munster_000062_000019_leftImg8bit.png
Cityscapes/images/val/munster/munster_000048_000019_leftImg8bit.png
Cityscapes/images/val/munster/munster_000154_000019_leftImg8bit.png
Cityscapes/images/val/munster/munster_000053_000019_leftImg8bit.png
Cityscapes/images/val/munster/munster_000022_000019_leftImg8bit.png
Cityscapes/images/val/munster/munster_000076_000019_leftImg8bit.png
Cityscapes/images/val/munster/munster_000040_000019_leftImg8bit.png
Cityscapes/images/val/munster/munster_000032_000019_leftImg8bit.png
Cityscapes/images/val/munster/munster_000163_000019_leftImg8bit.png
Cityscapes/images/val/munster/munster_000149_000019_leftImg8bit.png
Cityscapes/images/val/munster/munster_000094_000019_leftImg8bit.png
Cityscapes/images/val/munster/munster_000146_000019_leftImg8bit.png
Cityscapes/images/val/munster/munster_000084_000019_leftImg8bit.png
Cityscapes/images/val/munster/munster_000000_000019_leftImg8bit.png
Cityscapes/images/val/munster/munster_000092_000019_leftImg8bit.png
Cityscapes/images/val/munster/munster_000109_000019_leftImg8bit.png
Cityscapes/images/val/munster/munster_000019_000019_leftImg8bit.png
Cityscapes/images/val/munster/munster_000020_000019_leftImg8bit.png
Cityscapes/images/val/munster/munster_000089_000019_leftImg8bit.png
Cityscapes/images/val/munster/munster_000153_000019_leftImg8bit.png
Cityscapes/images/val/munster/munster_000152_000019_leftImg8bit.png
Cityscapes/images/val/munster/munster_000066_000019_leftImg8bit.png
Cityscapes/images/val/munster/munster_000131_000019_leftImg8bit.png
Cityscapes/images/val/munster/munster_000035_000019_leftImg8bit.png
Cityscapes/images/val/munster/munster_000151_000019_leftImg8bit.png
Cityscapes/images/val/munster/munster_000052_000019_leftImg8bit.png
Cityscapes/images/val/munster/munster_000105_000019_leftImg8bit.png
Cityscapes/images/val/munster/munster_000001_000019_leftImg8bit.png
Cityscapes/images/val/munster/munster_000108_000019_leftImg8bit.png
Cityscapes/images/val/munster/munster_000159_000019_leftImg8bit.png
Cityscapes/images/val/munster/munster_000073_000019_leftImg8bit.png
Cityscapes/images/val/munster/munster_000055_000019_leftImg8bit.png
Cityscapes/images/val/munster/munster_000106_000019_leftImg8bit.png
Cityscapes/images/val/munster/munster_000136_000019_leftImg8bit.png
Cityscapes/images/val/munster/munster_000050_000019_leftImg8bit.png
Cityscapes/images/val/munster/munster_000140_000019_leftImg8bit.png
Cityscapes/images/val/munster/munster_000147_000019_leftImg8bit.png
Cityscapes/images/val/munster/munster_000096_000019_leftImg8bit.png
Cityscapes/images/val/munster/munster_000166_000019_leftImg8bit.png
Cityscapes/images/val/munster/munster_000070_000019_leftImg8bit.png
Cityscapes/images/val/munster/munster_000133_000019_leftImg8bit.png
Cityscapes/images/val/munster/munster_000171_000019_leftImg8bit.png
Cityscapes/images/val/munster/munster_000056_000019_leftImg8bit.png
Cityscapes/images/val/munster/munster_000134_000019_leftImg8bit.png
Cityscapes/images/val/munster/munster_000162_000019_leftImg8bit.png
Cityscapes/images/val/munster/munster_000143_000019_leftImg8bit.png
Cityscapes/images/val/munster/munster_000150_000019_leftImg8bit.png
Cityscapes/images/val/munster/munster_000002_000019_leftImg8bit.png
Cityscapes/images/val/munster/munster_000160_000019_leftImg8bit.png
Cityscapes/images/val/munster/munster_000009_000019_leftImg8bit.png
Cityscapes/images/val/munster/munster_000003_000019_leftImg8bit.png
Cityscapes/images/val/munster/munster_000054_000019_leftImg8bit.png
Cityscapes/images/val/munster/munster_000170_000019_leftImg8bit.png
Cityscapes/images/val/munster/munster_000095_000019_leftImg8bit.png
Cityscapes/images/val/munster/munster_000141_000019_leftImg8bit.png
Cityscapes/images/val/munster/munster_000006_000019_leftImg8bit.png
Cityscapes/images/val/munster/munster_000126_000019_leftImg8bit.png
Cityscapes/images/val/munster/munster_000099_000019_leftImg8bit.png
Cityscapes/images/val/munster/munster_000071_000019_leftImg8bit.png
Cityscapes/images/val/munster/munster_000148_000019_leftImg8bit.png
Cityscapes/images/val/munster/munster_000128_000019_leftImg8bit.png
Cityscapes/images/val/munster/munster_000114_000019_leftImg8bit.png
Cityscapes/images/val/munster/munster_000018_000019_leftImg8bit.png
Cityscapes/images/val/munster/munster_000130_000019_leftImg8bit.png
Cityscapes/images/val/munster/munster_000113_000019_leftImg8bit.png
Cityscapes/images/val/munster/munster_000063_000019_leftImg8bit.png
Cityscapes/images/val/munster/munster_000157_000019_leftImg8bit.png
Cityscapes/images/val/munster/munster_000060_000019_leftImg8bit.png
Cityscapes/images/val/munster/munster_000116_000019_leftImg8bit.png
Cityscapes/images/val/munster/munster_000028_000019_leftImg8bit.png
Cityscapes/images/val/munster/munster_000075_000019_leftImg8bit.png
Cityscapes/images/val/munster/munster_000158_000019_leftImg8bit.png
Cityscapes/images/val/munster/munster_000155_000019_leftImg8bit.png
Cityscapes/images/val/munster/munster_000102_000019_leftImg8bit.png
Cityscapes/images/val/munster/munster_000172_000019_leftImg8bit.png
Cityscapes/images/val/munster/munster_000122_000019_leftImg8bit.png
Cityscapes/images/val/munster/munster_000142_000019_leftImg8bit.png
Cityscapes/images/val/munster/munster_000029_000019_leftImg8bit.png
Cityscapes/images/val/munster/munster_000046_000019_leftImg8bit.png
Cityscapes/images/val/munster/munster_000090_000019_leftImg8bit.png
Cityscapes/images/val/munster/munster_000013_000019_leftImg8bit.png
Cityscapes/images/val/munster/munster_000124_000019_leftImg8bit.png
Cityscapes/images/val/munster/munster_000061_000019_leftImg8bit.png
Cityscapes/images/val/munster/munster_000023_000019_leftImg8bit.png
Cityscapes/images/val/munster/munster_000139_000019_leftImg8bit.png
Cityscapes/images/val/munster/munster_000015_000019_leftImg8bit.png
Cityscapes/images/val/munster/munster_000033_000019_leftImg8bit.png
Cityscapes/images/val/munster/munster_000074_000019_leftImg8bit.png
Cityscapes/images/val/munster/munster_000145_000019_leftImg8bit.png
Cityscapes/images/val/munster/munster_000031_000019_leftImg8bit.png
Cityscapes/images/val/munster/munster_000168_000019_leftImg8bit.png
Cityscapes/images/val/munster/munster_000161_000019_leftImg8bit.png
Cityscapes/images/val/munster/munster_000069_000019_leftImg8bit.png
Cityscapes/images/val/munster/munster_000025_000019_leftImg8bit.png
Cityscapes/images/val/munster/munster_000167_000019_leftImg8bit.png
Cityscapes/images/val/munster/munster_000072_000019_leftImg8bit.png
Cityscapes/images/val/munster/munster_000125_000019_leftImg8bit.png
Cityscapes/images/val/munster/munster_000007_000019_leftImg8bit.png
Cityscapes/images/val/munster/munster_000042_000019_leftImg8bit.png
Cityscapes/images/val/munster/munster_000104_000019_leftImg8bit.png
Cityscapes/images/val/munster/munster_000115_000019_leftImg8bit.png
Cityscapes/images/val/munster/munster_000098_000019_leftImg8bit.png
Cityscapes/images/val/munster/munster_000047_000019_leftImg8bit.png
Cityscapes/images/val/munster/munster_000080_000019_leftImg8bit.png
Cityscapes/images/val/munster/munster_000137_000019_leftImg8bit.png
Cityscapes/images/val/munster/munster_000119_000019_leftImg8bit.png
Cityscapes/images/val/munster/munster_000088_000019_leftImg8bit.png
Cityscapes/images/val/munster/munster_000004_000019_leftImg8bit.png
Cityscapes/images/val/munster/munster_000016_000019_leftImg8bit.png
Cityscapes/images/val/munster/munster_000012_000019_leftImg8bit.png
Cityscapes/images/val/munster/munster_000156_000019_leftImg8bit.png
Cityscapes/images/val/munster/munster_000039_000019_leftImg8bit.png
Cityscapes/images/val/munster/munster_000101_000019_leftImg8bit.png
Cityscapes/images/val/munster/munster_000111_000019_leftImg8bit.png
Cityscapes/images/val/munster/munster_000010_000019_leftImg8bit.png
Cityscapes/images/val/munster/munster_000059_000019_leftImg8bit.png
Cityscapes/images/val/munster/munster_000110_000019_leftImg8bit.png
Cityscapes/images/val/munster/munster_000005_000019_leftImg8bit.png
Cityscapes/images/val/munster/munster_000121_000019_leftImg8bit.png
Cityscapes/images/val/munster/munster_000057_000019_leftImg8bit.png
Cityscapes/images/val/munster/munster_000079_000019_leftImg8bit.png
Cityscapes/images/val/munster/munster_000123_000019_leftImg8bit.png
Cityscapes/images/val/munster/munster_000112_000019_leftImg8bit.png
Cityscapes/images/val/munster/munster_000091_000019_leftImg8bit.png
Cityscapes/images/val/munster/munster_000127_000019_leftImg8bit.png
Cityscapes/images/val/munster/munster_000093_000019_leftImg8bit.png
Cityscapes/images/val/munster/munster_000038_000019_leftImg8bit.png
Cityscapes/images/val/munster/munster_000045_000019_leftImg8bit.png
Cityscapes/images/val/munster/munster_000017_000019_leftImg8bit.png
Cityscapes/images/val/munster/munster_000043_000019_leftImg8bit.png
Cityscapes/images/val/munster/munster_000021_000019_leftImg8bit.png
Cityscapes/images/val/munster/munster_000051_000019_leftImg8bit.png
Cityscapes/images/val/munster/munster_000103_000019_leftImg8bit.png
Cityscapes/images/val/munster/munster_000027_000019_leftImg8bit.png
Cityscapes/images/val/lindau/lindau_000012_000019_leftImg8bit.png
Cityscapes/images/val/lindau/lindau_000025_000019_leftImg8bit.png
Cityscapes/images/val/lindau/lindau_000052_000019_leftImg8bit.png
Cityscapes/images/val/lindau/lindau_000011_000019_leftImg8bit.png
Cityscapes/images/val/lindau/lindau_000055_000019_leftImg8bit.png
Cityscapes/images/val/lindau/lindau_000014_000019_leftImg8bit.png
Cityscapes/images/val/lindau/lindau_000037_000019_leftImg8bit.png
Cityscapes/images/val/lindau/lindau_000047_000019_leftImg8bit.png
Cityscapes/images/val/lindau/lindau_000057_000019_leftImg8bit.png
Cityscapes/images/val/lindau/lindau_000051_000019_leftImg8bit.png
Cityscapes/images/val/lindau/lindau_000042_000019_leftImg8bit.png
Cityscapes/images/val/lindau/lindau_000041_000019_leftImg8bit.png
Cityscapes/images/val/lindau/lindau_000020_000019_leftImg8bit.png
Cityscapes/images/val/lindau/lindau_000024_000019_leftImg8bit.png
Cityscapes/images/val/lindau/lindau_000035_000019_leftImg8bit.png
Cityscapes/images/val/lindau/lindau_000010_000019_leftImg8bit.png
Cityscapes/images/val/lindau/lindau_000046_000019_leftImg8bit.png
Cityscapes/images/val/lindau/lindau_000022_000019_leftImg8bit.png
Cityscapes/images/val/lindau/lindau_000053_000019_leftImg8bit.png
Cityscapes/images/val/lindau/lindau_000009_000019_leftImg8bit.png
Cityscapes/images/val/lindau/lindau_000013_000019_leftImg8bit.png
Cityscapes/images/val/lindau/lindau_000007_000019_leftImg8bit.png
Cityscapes/images/val/lindau/lindau_000038_000019_leftImg8bit.png
Cityscapes/images/val/lindau/lindau_000054_000019_leftImg8bit.png
Cityscapes/images/val/lindau/lindau_000005_000019_leftImg8bit.png
Cityscapes/images/val/lindau/lindau_000023_000019_leftImg8bit.png
Cityscapes/images/val/frankfurt/frankfurt_000001_066574_leftImg8bit.png
Cityscapes/images/val/frankfurt/frankfurt_000001_067474_leftImg8bit.png
Cityscapes/images/val/frankfurt/frankfurt_000001_077092_leftImg8bit.png
Cityscapes/images/val/frankfurt/frankfurt_000001_028590_leftImg8bit.png
Cityscapes/images/val/frankfurt/frankfurt_000001_028335_leftImg8bit.png
Cityscapes/images/val/frankfurt/frankfurt_000001_050149_leftImg8bit.png
Cityscapes/images/val/frankfurt/frankfurt_000001_043395_leftImg8bit.png
Cityscapes/images/val/frankfurt/frankfurt_000001_059119_leftImg8bit.png
Cityscapes/images/val/frankfurt/frankfurt_000000_005543_leftImg8bit.png
Cityscapes/images/val/frankfurt/frankfurt_000001_010156_leftImg8bit.png
Cityscapes/images/val/frankfurt/frankfurt_000001_064130_leftImg8bit.png
Cityscapes/images/val/frankfurt/frankfurt_000000_001016_leftImg8bit.png
Cityscapes/images/val/frankfurt/frankfurt_000000_003025_leftImg8bit.png
Cityscapes/images/val/frankfurt/frankfurt_000001_071288_leftImg8bit.png
Cityscapes/images/val/frankfurt/frankfurt_000001_055062_leftImg8bit.png
Cityscapes/images/val/frankfurt/frankfurt_000000_012868_leftImg8bit.png
Cityscapes/images/val/frankfurt/frankfurt_000001_048196_leftImg8bit.png
Cityscapes/images/val/frankfurt/frankfurt_000001_044658_leftImg8bit.png
Cityscapes/images/val/frankfurt/frankfurt_000001_080391_leftImg8bit.png
Cityscapes/images/val/frankfurt/frankfurt_000001_080091_leftImg8bit.png
Cityscapes/images/val/frankfurt/frankfurt_000000_007365_leftImg8bit.png
Cityscapes/images/val/frankfurt/frankfurt_000001_013710_leftImg8bit.png
Cityscapes/images/val/frankfurt/frankfurt_000000_013942_leftImg8bit.png
Cityscapes/images/val/frankfurt/frankfurt_000001_007973_leftImg8bit.png
Cityscapes/images/val/frankfurt/frankfurt_000001_020693_leftImg8bit.png
Cityscapes/images/val/frankfurt/frankfurt_000000_016286_leftImg8bit.png
Cityscapes/images/val/frankfurt/frankfurt_000001_073088_leftImg8bit.png
Cityscapes/images/val/frankfurt/frankfurt_000001_066438_leftImg8bit.png
Cityscapes/images/val/frankfurt/frankfurt_000001_030067_leftImg8bit.png
Cityscapes/images/val/frankfurt/frankfurt_000001_067178_leftImg8bit.png
Cityscapes/images/val/frankfurt/frankfurt_000000_014480_leftImg8bit.png
Cityscapes/images/val/frankfurt/frankfurt_000001_073464_leftImg8bit.png
Cityscapes/images/val/frankfurt/frankfurt_000000_011810_leftImg8bit.png
Cityscapes/images/val/frankfurt/frankfurt_000001_005898_leftImg8bit.png
Cityscapes/images/val/frankfurt/frankfurt_000001_019854_leftImg8bit.png
Cityscapes/images/val/frankfurt/frankfurt_000001_055709_leftImg8bit.png
Cityscapes/images/val/frankfurt/frankfurt_000001_002512_leftImg8bit.png
Cityscapes/images/val/frankfurt/frankfurt_000001_007622_leftImg8bit.png
Cityscapes/images/val/frankfurt/frankfurt_000001_054077_leftImg8bit.png
Cityscapes/images/val/frankfurt/frankfurt_000001_060545_leftImg8bit.png
Cityscapes/images/val/frankfurt/frankfurt_000001_063045_leftImg8bit.png
Cityscapes/images/val/frankfurt/frankfurt_000001_032556_leftImg8bit.png
Cityscapes/images/val/frankfurt/frankfurt_000001_064305_leftImg8bit.png
Cityscapes/images/val/frankfurt/frankfurt_000001_049698_leftImg8bit.png
Cityscapes/images/val/frankfurt/frankfurt_000001_007857_leftImg8bit.png
Cityscapes/images/val/frankfurt/frankfurt_000001_012519_leftImg8bit.png
Cityscapes/images/val/frankfurt/frankfurt_000001_034816_leftImg8bit.png
Cityscapes/images/val/frankfurt/frankfurt_000001_032018_leftImg8bit.png
Cityscapes/images/val/frankfurt/frankfurt_000001_019969_leftImg8bit.png
Cityscapes/images/val/frankfurt/frankfurt_000001_025713_leftImg8bit.png
Cityscapes/images/val/frankfurt/frankfurt_000001_065617_leftImg8bit.png
Cityscapes/images/val/frankfurt/frankfurt_000000_017228_leftImg8bit.png
Cityscapes/images/val/frankfurt/frankfurt_000001_062016_leftImg8bit.png
Cityscapes/images/val/frankfurt/frankfurt_000001_009504_leftImg8bit.png
Cityscapes/images/val/frankfurt/frankfurt_000000_010763_leftImg8bit.png
Cityscapes/images/val/frankfurt/frankfurt_000001_030669_leftImg8bit.png
Cityscapes/images/val/frankfurt/frankfurt_000001_002646_leftImg8bit.png
Cityscapes/images/val/frankfurt/frankfurt_000001_001464_leftImg8bit.png
Cityscapes/images/val/frankfurt/frankfurt_000001_062396_leftImg8bit.png
Cityscapes/images/val/frankfurt/frankfurt_000000_008206_leftImg8bit.png
Cityscapes/images/val/frankfurt/frankfurt_000001_004327_leftImg8bit.png
Cityscapes/images/val/frankfurt/frankfurt_000001_075984_leftImg8bit.png
Cityscapes/images/val/frankfurt/frankfurt_000001_052120_leftImg8bit.png
Cityscapes/images/val/frankfurt/frankfurt_000001_020046_leftImg8bit.png
Cityscapes/images/val/frankfurt/frankfurt_000001_046504_leftImg8bit.png
Cityscapes/images/val/frankfurt/frankfurt_000001_078803_leftImg8bit.png
Cityscapes/images/val/frankfurt/frankfurt_000001_011835_leftImg8bit.png
Cityscapes/images/val/frankfurt/frankfurt_000001_038418_leftImg8bit.png
Cityscapes/images/val/frankfurt/frankfurt_000001_012699_leftImg8bit.png
Cityscapes/images/val/frankfurt/frankfurt_000001_031266_leftImg8bit.png
Cityscapes/images/val/frankfurt/frankfurt_000000_022254_leftImg8bit.png
Cityscapes/images/val/frankfurt/frankfurt_000001_068208_leftImg8bit.png
Cityscapes/images/val/frankfurt/frankfurt_000001_007285_leftImg8bit.png
Cityscapes/images/val/frankfurt/frankfurt_000001_039895_leftImg8bit.png
Cityscapes/images/val/frankfurt/frankfurt_000001_073243_leftImg8bit.png
Cityscapes/images/val/frankfurt/frankfurt_000001_038245_leftImg8bit.png
Cityscapes/images/val/frankfurt/frankfurt_000001_046779_leftImg8bit.png
Cityscapes/images/val/frankfurt/frankfurt_000001_025921_leftImg8bit.png
Cityscapes/images/val/frankfurt/frankfurt_000000_016005_leftImg8bit.png
Cityscapes/images/val/frankfurt/frankfurt_000001_044787_leftImg8bit.png
Cityscapes/images/val/frankfurt/frankfurt_000001_054415_leftImg8bit.png
Cityscapes/images/val/frankfurt/frankfurt_000001_033655_leftImg8bit.png
Cityscapes/images/val/frankfurt/frankfurt_000001_049209_leftImg8bit.png
Cityscapes/images/val/frankfurt/frankfurt_000001_061763_leftImg8bit.png
Cityscapes/images/val/frankfurt/frankfurt_000000_022797_leftImg8bit.png
Cityscapes/images/val/frankfurt/frankfurt_000001_028232_leftImg8bit.png
Cityscapes/images/val/frankfurt/frankfurt_000001_083852_leftImg8bit.png
Cityscapes/images/val/frankfurt/frankfurt_000001_058057_leftImg8bit.png
Cityscapes/images/val/frankfurt/frankfurt_000001_020287_leftImg8bit.png
Cityscapes/images/val/frankfurt/frankfurt_000001_012738_leftImg8bit.png
Cityscapes/images/val/frankfurt/frankfurt_000001_043564_leftImg8bit.png
Cityscapes/images/val/frankfurt/frankfurt_000000_005898_leftImg8bit.png
Cityscapes/images/val/frankfurt/frankfurt_000001_049770_leftImg8bit.png
Cityscapes/images/val/frankfurt/frankfurt_000001_065850_leftImg8bit.png
Cityscapes/images/val/frankfurt/frankfurt_000001_044525_leftImg8bit.png
Cityscapes/images/val/frankfurt/frankfurt_000001_009854_leftImg8bit.png
Cityscapes/images/val/frankfurt/frankfurt_000001_077434_leftImg8bit.png
Cityscapes/images/val/frankfurt/frankfurt_000001_062250_leftImg8bit.png
Cityscapes/images/val/frankfurt/frankfurt_000001_038645_leftImg8bit.png
Cityscapes/images/val/frankfurt/frankfurt_000001_057181_leftImg8bit.png
Cityscapes/images/val/frankfurt/frankfurt_000001_062509_leftImg8bit.png
Cityscapes/images/val/frankfurt/frankfurt_000000_020215_leftImg8bit.png
Cityscapes/images/val/frankfurt/frankfurt_000000_020321_leftImg8bit.png
Cityscapes/images/val/frankfurt/frankfurt_000001_016029_leftImg8bit.png
Cityscapes/images/val/frankfurt/frankfurt_000001_068063_leftImg8bit.png
Cityscapes/images/val/frankfurt/frankfurt_000000_017476_leftImg8bit.png
Cityscapes/images/val/frankfurt/frankfurt_000001_011162_leftImg8bit.png
Cityscapes/images/val/frankfurt/frankfurt_000000_011461_leftImg8bit.png
Cityscapes/images/val/frankfurt/frankfurt_000000_013240_leftImg8bit.png
Cityscapes/images/val/frankfurt/frankfurt_000001_079206_leftImg8bit.png
Cityscapes/images/val/frankfurt/frankfurt_000001_018113_leftImg8bit.png
Cityscapes/images/val/frankfurt/frankfurt_000001_066092_leftImg8bit.png
Cityscapes/images/val/frankfurt/frankfurt_000001_051807_leftImg8bit.png
Cityscapes/images/val/frankfurt/frankfurt_000001_058176_leftImg8bit.png
Cityscapes/images/val/frankfurt/frankfurt_000000_015676_leftImg8bit.png
Cityscapes/images/val/frankfurt/frankfurt_000000_018797_leftImg8bit.png
Cityscapes/images/val/frankfurt/frankfurt_000001_012870_leftImg8bit.png
Cityscapes/images/val/frankfurt/frankfurt_000001_064925_leftImg8bit.png
Cityscapes/images/val/frankfurt/frankfurt_000001_023235_leftImg8bit.png
Cityscapes/images/val/frankfurt/frankfurt_000001_010600_leftImg8bit.png
Cityscapes/images/val/frankfurt/frankfurt_000001_077233_leftImg8bit.png
Cityscapes/images/val/frankfurt/frankfurt_000001_067735_leftImg8bit.png
Cityscapes/images/val/frankfurt/frankfurt_000001_014221_leftImg8bit.png
Cityscapes/images/val/frankfurt/frankfurt_000000_021879_leftImg8bit.png
Cityscapes/images/val/frankfurt/frankfurt_000001_041664_leftImg8bit.png
Cityscapes/images/val/frankfurt/frankfurt_000001_003588_leftImg8bit.png
Cityscapes/images/val/frankfurt/frankfurt_000000_009561_leftImg8bit.png
Cityscapes/images/val/frankfurt/frankfurt_000001_062793_leftImg8bit.png
Cityscapes/images/val/frankfurt/frankfurt_000001_014565_leftImg8bit.png
Cityscapes/images/val/frankfurt/frankfurt_000001_083029_leftImg8bit.png
Cityscapes/images/val/frankfurt/frankfurt_000000_013067_leftImg8bit.png
Cityscapes/images/val/frankfurt/frankfurt_000000_009688_leftImg8bit.png
Cityscapes/images/val/frankfurt/frankfurt_000001_055387_leftImg8bit.png
Cityscapes/images/val/frankfurt/frankfurt_000001_023769_leftImg8bit.png
Cityscapes/images/val/frankfurt/frankfurt_000001_013016_leftImg8bit.png
Cityscapes/images/val/frankfurt/frankfurt_000001_083199_leftImg8bit.png
Cityscapes/images/val/frankfurt/frankfurt_000001_034047_leftImg8bit.png
Cityscapes/images/val/frankfurt/frankfurt_000001_049298_leftImg8bit.png
Cityscapes/images/val/frankfurt/frankfurt_000001_035144_leftImg8bit.png
Cityscapes/images/val/frankfurt/frankfurt_000001_008688_leftImg8bit.png
Cityscapes/images/val/frankfurt/frankfurt_000001_015328_leftImg8bit.png
Cityscapes/images/val/frankfurt/frankfurt_000001_024927_leftImg8bit.png
Cityscapes/images/val/frankfurt/frankfurt_000001_032942_leftImg8bit.png
Cityscapes/images/val/frankfurt/frankfurt_000001_027325_leftImg8bit.png
Cityscapes/images/val/frankfurt/frankfurt_000001_014741_leftImg8bit.png
Cityscapes/images/val/frankfurt/frankfurt_000000_009969_leftImg8bit.png
Cityscapes/images/val/frankfurt/frankfurt_000001_067295_leftImg8bit.png
Cityscapes/images/val/frankfurt/frankfurt_000000_012121_leftImg8bit.png
Cityscapes/images/val/frankfurt/frankfurt_000001_004859_leftImg8bit.png
Cityscapes/images/val/frankfurt/frankfurt_000000_015389_leftImg8bit.png
Cityscapes/images/val/frankfurt/frankfurt_000001_068682_leftImg8bit.png
Cityscapes/images/val/frankfurt/frankfurt_000001_021406_leftImg8bit.png
Cityscapes/images/val/frankfurt/frankfurt_000001_050686_leftImg8bit.png
Cityscapes/images/val/frankfurt/frankfurt_000001_000538_leftImg8bit.png
Cityscapes/images/val/frankfurt/frankfurt_000001_082087_leftImg8bit.png
Cityscapes/images/val/frankfurt/frankfurt_000001_057954_leftImg8bit.png
Cityscapes/images/val/frankfurt/frankfurt_000001_054219_leftImg8bit.png
Cityscapes/images/val/frankfurt/frankfurt_000000_003920_leftImg8bit.png
Cityscapes/images/val/frankfurt/frankfurt_000001_040732_leftImg8bit.png
Cityscapes/images/val/frankfurt/frankfurt_000001_038844_leftImg8bit.png
Cityscapes/images/val/frankfurt/frankfurt_000001_042733_leftImg8bit.png
Cityscapes/images/val/frankfurt/frankfurt_000001_037705_leftImg8bit.png
Cityscapes/images/val/frankfurt/frankfurt_000001_069633_leftImg8bit.png
Cityscapes/images/val/frankfurt/frankfurt_000001_005703_leftImg8bit.png
Cityscapes/images/val/frankfurt/frankfurt_000001_025512_leftImg8bit.png
Cityscapes/images/val/frankfurt/frankfurt_000001_002759_leftImg8bit.png
Cityscapes/images/val/frankfurt/frankfurt_000001_015091_leftImg8bit.png
Cityscapes/images/val/frankfurt/frankfurt_000001_080830_leftImg8bit.png
Cityscapes/images/val/frankfurt/frankfurt_000000_019607_leftImg8bit.png
Cityscapes/images/val/frankfurt/frankfurt_000001_009058_leftImg8bit.png
Cityscapes/images/val/frankfurt/frankfurt_000001_015768_leftImg8bit.png
Cityscapes/images/val/frankfurt/frankfurt_000001_042384_leftImg8bit.png
Cityscapes/images/val/frankfurt/frankfurt_000000_012009_leftImg8bit.png
Cityscapes/images/val/frankfurt/frankfurt_000001_068772_leftImg8bit.png
Cityscapes/images/val/frankfurt/frankfurt_000001_072155_leftImg8bit.png
Cityscapes/images/val/frankfurt/frankfurt_000001_058504_leftImg8bit.png
Cityscapes/images/val/frankfurt/frankfurt_000001_016273_leftImg8bit.png
Cityscapes/images/val/frankfurt/frankfurt_000001_060906_leftImg8bit.png
Cityscapes/images/val/frankfurt/frankfurt_000001_066832_leftImg8bit.png
Cityscapes/images/val/frankfurt/frankfurt_000000_009291_leftImg8bit.png
Cityscapes/images/val/frankfurt/frankfurt_000001_059642_leftImg8bit.png
Cityscapes/images/val/frankfurt/frankfurt_000001_064798_leftImg8bit.png
Cityscapes/images/val/frankfurt/frankfurt_000001_060422_leftImg8bit.png
Cityscapes/images/val/frankfurt/frankfurt_000000_001236_leftImg8bit.png
Cityscapes/images/val/frankfurt/frankfurt_000001_055172_leftImg8bit.png
Cityscapes/images/val/frankfurt/frankfurt_000001_005410_leftImg8bit.png
Cityscapes/images/val/frankfurt/frankfurt_000001_075296_leftImg8bit.png
Cityscapes/images/val/frankfurt/frankfurt_000001_005184_leftImg8bit.png
Cityscapes/images/val/frankfurt/frankfurt_000001_044413_leftImg8bit.png
Cityscapes/images/val/frankfurt/frankfurt_000000_002196_leftImg8bit.png
Cityscapes/images/val/frankfurt/frankfurt_000001_064651_leftImg8bit.png
Cityscapes/images/val/frankfurt/frankfurt_000001_014406_leftImg8bit.png
Cityscapes/images/val/frankfurt/frankfurt_000001_048355_leftImg8bit.png
Cityscapes/images/val/frankfurt/frankfurt_000001_051516_leftImg8bit.png
Cityscapes/images/val/frankfurt/frankfurt_000001_040575_leftImg8bit.png
Cityscapes/images/val/frankfurt/frankfurt_000001_017842_leftImg8bit.png
Cityscapes/images/val/frankfurt/frankfurt_000001_041074_leftImg8bit.png
Cityscapes/images/val/frankfurt/frankfurt_000001_057478_leftImg8bit.png
Cityscapes/images/val/frankfurt/frankfurt_000001_055306_leftImg8bit.png
Cityscapes/images/val/frankfurt/frankfurt_000001_062653_leftImg8bit.png
Cityscapes/images/val/frankfurt/frankfurt_000001_007407_leftImg8bit.png
Cityscapes/images/val/frankfurt/frankfurt_000001_048654_leftImg8bit.png
Cityscapes/images/val/frankfurt/frankfurt_000001_019698_leftImg8bit.png
Cityscapes/images/val/frankfurt/frankfurt_000001_049078_leftImg8bit.png
Cityscapes/images/val/frankfurt/frankfurt_000001_061682_leftImg8bit.png
Cityscapes/images/val/frankfurt/frankfurt_000001_055603_leftImg8bit.png
Cityscapes/images/val/frankfurt/frankfurt_000001_023369_leftImg8bit.png
Cityscapes/images/val/frankfurt/frankfurt_000001_067092_leftImg8bit.png
Cityscapes/images/val/frankfurt/frankfurt_000001_054884_leftImg8bit.png
Cityscapes/images/val/frankfurt/frankfurt_000001_058914_leftImg8bit.png
Cityscapes/images/val/frankfurt/frankfurt_000001_070099_leftImg8bit.png
Cityscapes/images/val/frankfurt/frankfurt_000000_000294_leftImg8bit.png
Cityscapes/images/val/frankfurt/frankfurt_000001_056580_leftImg8bit.png
Cityscapes/images/val/frankfurt/frankfurt_000001_032711_leftImg8bit.png
Cityscapes/images/val/frankfurt/frankfurt_000000_013382_leftImg8bit.png
Cityscapes/images/val/frankfurt/frankfurt_000001_010830_leftImg8bit.png
Cityscapes/images/val/frankfurt/frankfurt_000000_000576_leftImg8bit.png
Cityscapes/images/val/frankfurt/frankfurt_000000_020880_leftImg8bit.png
Cityscapes/images/val/frankfurt/frankfurt_000001_030310_leftImg8bit.png
Cityscapes/images/val/frankfurt/frankfurt_000001_011715_leftImg8bit.png
Cityscapes/images/val/frankfurt/frankfurt_000001_008200_leftImg8bit.png
Cityscapes/images/val/frankfurt/frankfurt_000001_052594_leftImg8bit.png
Cityscapes/images/val/frankfurt/frankfurt_000001_042098_leftImg8bit.png
Cityscapes/images/val/frankfurt/frankfurt_000000_011007_leftImg8bit.png
Cityscapes/images/val/frankfurt/frankfurt_000001_012038_leftImg8bit.png
Cityscapes/images/val/frankfurt/frankfurt_000000_001751_leftImg8bit.png
Cityscapes/images/val/frankfurt/frankfurt_000001_046126_leftImg8bit.png
Cityscapes/images/val/frankfurt/frankfurt_000001_053102_leftImg8bit.png
Cityscapes/images/val/frankfurt/frankfurt_000001_013496_leftImg8bit.png
Cityscapes/images/val/frankfurt/frankfurt_000001_046272_leftImg8bit.png
Cityscapes/images/val/frankfurt/frankfurt_000001_073911_leftImg8bit.png
Cityscapes/images/val/frankfurt/frankfurt_000000_021667_leftImg8bit.png
Cityscapes/images/val/frankfurt/frankfurt_000001_065160_leftImg8bit.png
Cityscapes/images/val/frankfurt/frankfurt_000001_059789_leftImg8bit.png
Cityscapes/images/val/frankfurt/frankfurt_000001_017101_leftImg8bit.png
Cityscapes/images/val/frankfurt/frankfurt_000001_044227_leftImg8bit.png
Cityscapes/images/val/frankfurt/frankfurt_000001_029600_leftImg8bit.png
Cityscapes/images/val/frankfurt/frankfurt_000000_008451_leftImg8bit.png
Cityscapes/images/val/frankfurt/frankfurt_000001_076502_leftImg8bit.png
Cityscapes/images/val/frankfurt/frankfurt_000001_029086_leftImg8bit.png
Cityscapes/images/val/frankfurt/frankfurt_000001_051737_leftImg8bit.png
Cityscapes/images/val/frankfurt/frankfurt_000001_031416_leftImg8bit.png
Cityscapes/images/val/frankfurt/frankfurt_000001_029236_leftImg8bit.png
Cityscapes/images/val/frankfurt/frankfurt_000001_004736_leftImg8bit.png
Cityscapes/images/val/frankfurt/frankfurt_000001_047178_leftImg8bit.png
Cityscapes/images/val/frankfurt/frankfurt_000001_010444_leftImg8bit.png
Cityscapes/images/val/frankfurt/frankfurt_000001_016462_leftImg8bit.png
Cityscapes/images/val/frankfurt/frankfurt_000001_028854_leftImg8bit.png
Cityscapes/images/val/frankfurt/frankfurt_000001_055538_leftImg8bit.png
Cityscapes/images/val/frankfurt/frankfurt_000001_021825_leftImg8bit.png
Cityscapes/images/val/frankfurt/frankfurt_000000_011074_leftImg8bit.png
Cityscapes/images/val/frankfurt/frankfurt_000001_071781_leftImg8bit.png
Cityscapes/images/val/frankfurt/frankfurt_000001_017459_leftImg8bit.png
Cityscapes/images/val/frankfurt/frankfurt_000001_054640_leftImg8bit.png
Cityscapes/images/val/frankfurt/frankfurt_000001_035864_leftImg8bit.png
Cityscapes/images/val/munster/munster_000129_000019_leftImg8bit.png
Cityscapes/images/val/munster/munster_000011_000019_leftImg8bit.png
Cityscapes/images/val/munster/munster_000044_000019_leftImg8bit.png
Cityscapes/images/val/munster/munster_000165_000019_leftImg8bit.png
Cityscapes/images/val/munster/munster_000078_000019_leftImg8bit.png
Cityscapes/images/val/munster/munster_000014_000019_leftImg8bit.png
Cityscapes/images/val/munster/munster_000086_000019_leftImg8bit.png
Cityscapes/images/val/munster/munster_000067_000019_leftImg8bit.png
Cityscapes/images/val/munster/munster_000097_000019_leftImg8bit.png
Cityscapes/images/val/munster/munster_000077_000019_leftImg8bit.png
Cityscapes/images/val/munster/munster_000138_000019_leftImg8bit.png
Cityscapes/images/val/munster/munster_000058_000019_leftImg8bit.png
Cityscapes/images/val/munster/munster_000030_000019_leftImg8bit.png
Cityscapes/images/val/munster/munster_000083_000019_leftImg8bit.png
Cityscapes/images/val/munster/munster_000085_000019_leftImg8bit.png
Cityscapes/images/val/munster/munster_000036_000019_leftImg8bit.png
Cityscapes/images/val/munster/munster_000026_000019_leftImg8bit.png
Cityscapes/images/val/munster/munster_000068_000019_leftImg8bit.png
Cityscapes/images/val/munster/munster_000064_000019_leftImg8bit.png
Cityscapes/images/val/munster/munster_000024_000019_leftImg8bit.png
Cityscapes/images/val/munster/munster_000135_000019_leftImg8bit.png
Cityscapes/images/val/munster/munster_000120_000019_leftImg8bit.png
Cityscapes/images/val/munster/munster_000041_000019_leftImg8bit.png
Cityscapes/images/val/munster/munster_000169_000019_leftImg8bit.png
Cityscapes/images/val/munster/munster_000144_000019_leftImg8bit.png
Cityscapes/images/val/munster/munster_000049_000019_leftImg8bit.png
Cityscapes/images/val/munster/munster_000062_000019_leftImg8bit.png
Cityscapes/images/val/munster/munster_000048_000019_leftImg8bit.png
Cityscapes/images/val/munster/munster_000154_000019_leftImg8bit.png
Cityscapes/images/val/munster/munster_000053_000019_leftImg8bit.png
Cityscapes/images/val/munster/munster_000022_000019_leftImg8bit.png
Cityscapes/images/val/munster/munster_000076_000019_leftImg8bit.png
Cityscapes/images/val/munster/munster_000040_000019_leftImg8bit.png
Cityscapes/images/val/munster/munster_000032_000019_leftImg8bit.png
Cityscapes/images/val/munster/munster_000163_000019_leftImg8bit.png
Cityscapes/images/val/munster/munster_000149_000019_leftImg8bit.png
Cityscapes/images/val/munster/munster_000094_000019_leftImg8bit.png
Cityscapes/images/val/munster/munster_000146_000019_leftImg8bit.png
Cityscapes/images/val/munster/munster_000084_000019_leftImg8bit.png
Cityscapes/images/val/munster/munster_000000_000019_leftImg8bit.png
Cityscapes/images/val/munster/munster_000092_000019_leftImg8bit.png
Cityscapes/images/val/munster/munster_000109_000019_leftImg8bit.png
Cityscapes/images/val/munster/munster_000019_000019_leftImg8bit.png
Cityscapes/images/val/munster/munster_000020_000019_leftImg8bit.png
Cityscapes/images/val/munster/munster_000089_000019_leftImg8bit.png
Cityscapes/images/val/munster/munster_000153_000019_leftImg8bit.png
Cityscapes/images/val/munster/munster_000152_000019_leftImg8bit.png
Cityscapes/images/val/munster/munster_000066_000019_leftImg8bit.png
Cityscapes/images/val/munster/munster_000131_000019_leftImg8bit.png
Cityscapes/images/val/munster/munster_000035_000019_leftImg8bit.png
Cityscapes/images/val/munster/munster_000151_000019_leftImg8bit.png
Cityscapes/images/val/munster/munster_000052_000019_leftImg8bit.png
Cityscapes/images/val/munster/munster_000105_000019_leftImg8bit.png
Cityscapes/images/val/munster/munster_000001_000019_leftImg8bit.png
Cityscapes/images/val/munster/munster_000108_000019_leftImg8bit.png
Cityscapes/images/val/munster/munster_000159_000019_leftImg8bit.png
Cityscapes/images/val/munster/munster_000073_000019_leftImg8bit.png
Cityscapes/images/val/munster/munster_000055_000019_leftImg8bit.png
Cityscapes/images/val/munster/munster_000106_000019_leftImg8bit.png
Cityscapes/images/val/munster/munster_000136_000019_leftImg8bit.png
Cityscapes/images/val/munster/munster_000050_000019_leftImg8bit.png
Cityscapes/images/val/munster/munster_000140_000019_leftImg8bit.png
Cityscapes/images/val/munster/munster_000147_000019_leftImg8bit.png
Cityscapes/images/val/munster/munster_000096_000019_leftImg8bit.png
Cityscapes/images/val/munster/munster_000166_000019_leftImg8bit.png
Cityscapes/images/val/munster/munster_000070_000019_leftImg8bit.png
Cityscapes/images/val/munster/munster_000133_000019_leftImg8bit.png
Cityscapes/images/val/munster/munster_000171_000019_leftImg8bit.png
Cityscapes/images/val/munster/munster_000056_000019_leftImg8bit.png
Cityscapes/images/val/munster/munster_000134_000019_leftImg8bit.png
Cityscapes/images/val/munster/munster_000162_000019_leftImg8bit.png
Cityscapes/images/val/munster/munster_000143_000019_leftImg8bit.png
Cityscapes/images/val/munster/munster_000150_000019_leftImg8bit.png
Cityscapes/images/val/munster/munster_000002_000019_leftImg8bit.png
Cityscapes/images/val/munster/munster_000160_000019_leftImg8bit.png
Cityscapes/images/val/munster/munster_000009_000019_leftImg8bit.png
Cityscapes/images/val/munster/munster_000003_000019_leftImg8bit.png
Cityscapes/images/val/munster/munster_000054_000019_leftImg8bit.png
Cityscapes/images/val/munster/munster_000170_000019_leftImg8bit.png
Cityscapes/images/val/munster/munster_000095_000019_leftImg8bit.png
Cityscapes/images/val/munster/munster_000141_000019_leftImg8bit.png
Cityscapes/images/val/munster/munster_000006_000019_leftImg8bit.png
Cityscapes/images/val/munster/munster_000126_000019_leftImg8bit.png
Cityscapes/images/val/munster/munster_000099_000019_leftImg8bit.png
Cityscapes/images/val/munster/munster_000071_000019_leftImg8bit.png
Cityscapes/images/val/munster/munster_000148_000019_leftImg8bit.png
Cityscapes/images/val/munster/munster_000128_000019_leftImg8bit.png
Cityscapes/images/val/munster/munster_000114_000019_leftImg8bit.png
Cityscapes/images/val/munster/munster_000018_000019_leftImg8bit.png
Cityscapes/images/val/munster/munster_000130_000019_leftImg8bit.png
Cityscapes/images/val/munster/munster_000113_000019_leftImg8bit.png
Cityscapes/images/val/munster/munster_000063_000019_leftImg8bit.png
Cityscapes/images/val/munster/munster_000157_000019_leftImg8bit.png
Cityscapes/images/val/munster/munster_000060_000019_leftImg8bit.png
Cityscapes/images/val/munster/munster_000116_000019_leftImg8bit.png
Cityscapes/images/val/munster/munster_000028_000019_leftImg8bit.png
Cityscapes/images/val/munster/munster_000075_000019_leftImg8bit.png
Cityscapes/images/val/munster/munster_000158_000019_leftImg8bit.png
Cityscapes/images/val/munster/munster_000155_000019_leftImg8bit.png
Cityscapes/images/val/munster/munster_000102_000019_leftImg8bit.png
Cityscapes/images/val/munster/munster_000172_000019_leftImg8bit.png
Cityscapes/images/val/munster/munster_000122_000019_leftImg8bit.png
Cityscapes/images/val/munster/munster_000142_000019_leftImg8bit.png
Cityscapes/images/val/munster/munster_000029_000019_leftImg8bit.png
Cityscapes/images/val/munster/munster_000046_000019_leftImg8bit.png
Cityscapes/images/val/munster/munster_000090_000019_leftImg8bit.png
Cityscapes/images/val/munster/munster_000013_000019_leftImg8bit.png
Cityscapes/images/val/munster/munster_000124_000019_leftImg8bit.png
Cityscapes/images/val/munster/munster_000061_000019_leftImg8bit.png
Cityscapes/images/val/munster/munster_000023_000019_leftImg8bit.png
Cityscapes/images/val/munster/munster_000139_000019_leftImg8bit.png
Cityscapes/images/val/munster/munster_000015_000019_leftImg8bit.png
Cityscapes/images/val/munster/munster_000033_000019_leftImg8bit.png
Cityscapes/images/val/munster/munster_000074_000019_leftImg8bit.png
Cityscapes/images/val/munster/munster_000145_000019_leftImg8bit.png
Cityscapes/images/val/munster/munster_000031_000019_leftImg8bit.png
Cityscapes/images/val/munster/munster_000168_000019_leftImg8bit.png
Cityscapes/images/val/munster/munster_000161_000019_leftImg8bit.png
Cityscapes/images/val/munster/munster_000069_000019_leftImg8bit.png
Cityscapes/images/val/munster/munster_000025_000019_leftImg8bit.png
Cityscapes/images/val/munster/munster_000167_000019_leftImg8bit.png
Cityscapes/images/val/munster/munster_000072_000019_leftImg8bit.png
Cityscapes/images/val/munster/munster_000125_000019_leftImg8bit.png
Cityscapes/images/val/munster/munster_000007_000019_leftImg8bit.png
Cityscapes/images/val/munster/munster_000042_000019_leftImg8bit.png
Cityscapes/images/val/munster/munster_000104_000019_leftImg8bit.png
Cityscapes/images/val/munster/munster_000115_000019_leftImg8bit.png
Cityscapes/images/val/munster/munster_000098_000019_leftImg8bit.png
Cityscapes/images/val/munster/munster_000047_000019_leftImg8bit.png
Cityscapes/images/val/munster/munster_000080_000019_leftImg8bit.png
Cityscapes/images/val/munster/munster_000137_000019_leftImg8bit.png
Cityscapes/images/val/munster/munster_000119_000019_leftImg8bit.png
Cityscapes/images/val/munster/munster_000088_000019_leftImg8bit.png
Cityscapes/images/val/munster/munster_000004_000019_leftImg8bit.png
Cityscapes/images/val/munster/munster_000016_000019_leftImg8bit.png
Cityscapes/images/val/munster/munster_000012_000019_leftImg8bit.png
Cityscapes/images/val/munster/munster_000156_000019_leftImg8bit.png
Cityscapes/images/val/munster/munster_000039_000019_leftImg8bit.png
Cityscapes/images/val/munster/munster_000101_000019_leftImg8bit.png
Cityscapes/images/val/munster/munster_000111_000019_leftImg8bit.png
Cityscapes/images/val/munster/munster_000010_000019_leftImg8bit.png
Cityscapes/images/val/munster/munster_000059_000019_leftImg8bit.png
Cityscapes/images/val/munster/munster_000110_000019_leftImg8bit.png
Cityscapes/images/val/munster/munster_000005_000019_leftImg8bit.png
Cityscapes/images/val/munster/munster_000121_000019_leftImg8bit.png
Cityscapes/images/val/munster/munster_000057_000019_leftImg8bit.png
Cityscapes/images/val/munster/munster_000079_000019_leftImg8bit.png
Cityscapes/images/val/munster/munster_000123_000019_leftImg8bit.png
Cityscapes/images/val/munster/munster_000112_000019_leftImg8bit.png
Cityscapes/images/val/munster/munster_000091_000019_leftImg8bit.png
Cityscapes/images/val/munster/munster_000127_000019_leftImg8bit.png
Cityscapes/images/val/munster/munster_000093_000019_leftImg8bit.png
Cityscapes/images/val/munster/munster_000038_000019_leftImg8bit.png
Cityscapes/images/val/munster/munster_000045_000019_leftImg8bit.png
Cityscapes/images/val/munster/munster_000017_000019_leftImg8bit.png
Cityscapes/images/val/munster/munster_000043_000019_leftImg8bit.png
Cityscapes/images/val/munster/munster_000021_000019_leftImg8bit.png
Cityscapes/images/val/munster/munster_000051_000019_leftImg8bit.png
Cityscapes/images/val/munster/munster_000103_000019_leftImg8bit.png
Cityscapes/images/val/munster/munster_000027_000019_leftImg8bit.png
Cityscapes/images/val/lindau/lindau_000012_000019_leftImg8bit.png
Cityscapes/images/val/lindau/lindau_000025_000019_leftImg8bit.png
Cityscapes/images/val/lindau/lindau_000052_000019_leftImg8bit.png
Cityscapes/images/val/lindau/lindau_000011_000019_leftImg8bit.png
Cityscapes/images/val/lindau/lindau_000055_000019_leftImg8bit.png
Cityscapes/images/val/lindau/lindau_000014_000019_leftImg8bit.png
Cityscapes/images/val/lindau/lindau_000037_000019_leftImg8bit.png
Cityscapes/images/val/lindau/lindau_000047_000019_leftImg8bit.png
Cityscapes/images/val/lindau/lindau_000057_000019_leftImg8bit.png
Cityscapes/images/val/lindau/lindau_000051_000019_leftImg8bit.png
Cityscapes/images/val/lindau/lindau_000042_000019_leftImg8bit.png
Cityscapes/images/val/lindau/lindau_000041_000019_leftImg8bit.png
Cityscapes/images/val/lindau/lindau_000020_000019_leftImg8bit.png
Cityscapes/images/val/lindau/lindau_000024_000019_leftImg8bit.png
Cityscapes/images/val/lindau/lindau_000035_000019_leftImg8bit.png
Cityscapes/images/val/lindau/lindau_000010_000019_leftImg8bit.png
Cityscapes/images/val/lindau/lindau_000046_000019_leftImg8bit.png
Cityscapes/images/val/lindau/lindau_000022_000019_leftImg8bit.png
Cityscapes/images/val/lindau/lindau_000053_000019_leftImg8bit.png
Cityscapes/images/val/lindau/lindau_000009_000019_leftImg8bit.png
Cityscapes/images/val/lindau/lindau_000013_000019_leftImg8bit.png
Cityscapes/images/val/lindau/lindau_000007_000019_leftImg8bit.png
Cityscapes/images/val/lindau/lindau_000038_000019_leftImg8bit.png
Cityscapes/images/val/lindau/lindau_000054_000019_leftImg8bit.png
Cityscapes/images/val/lindau/lindau_000005_000019_leftImg8bit.png
Cityscapes/images/val/lindau/lindau_000023_000019_leftImg8bit.png

File diff suppressed because it is too large Load Diff

File diff suppressed because it is too large Load Diff

File diff suppressed because it is too large Load Diff

23
data/mot16-01.test Normal file
View File

@ -0,0 +1,23 @@
/home/wangzd/datasets/MOT/MOT16/images/test/MOT16-01/img1/000041.jpg
/home/wangzd/datasets/MOT/MOT16/images/test/MOT16-01/img1/000201.jpg
/home/wangzd/datasets/MOT/MOT16/images/test/MOT16-01/img1/000221.jpg
/home/wangzd/datasets/MOT/MOT16/images/test/MOT16-01/img1/000061.jpg
/home/wangzd/datasets/MOT/MOT16/images/test/MOT16-01/img1/000021.jpg
/home/wangzd/datasets/MOT/MOT16/images/test/MOT16-01/img1/000261.jpg
/home/wangzd/datasets/MOT/MOT16/images/test/MOT16-01/img1/000241.jpg
/home/wangzd/datasets/MOT/MOT16/images/test/MOT16-01/img1/000001.jpg
/home/wangzd/datasets/MOT/MOT16/images/test/MOT16-01/img1/000421.jpg
/home/wangzd/datasets/MOT/MOT16/images/test/MOT16-01/img1/000401.jpg
/home/wangzd/datasets/MOT/MOT16/images/test/MOT16-01/img1/000381.jpg
/home/wangzd/datasets/MOT/MOT16/images/test/MOT16-01/img1/000181.jpg
/home/wangzd/datasets/MOT/MOT16/images/test/MOT16-01/img1/000441.jpg
/home/wangzd/datasets/MOT/MOT16/images/test/MOT16-01/img1/000161.jpg
/home/wangzd/datasets/MOT/MOT16/images/test/MOT16-01/img1/000321.jpg
/home/wangzd/datasets/MOT/MOT16/images/test/MOT16-01/img1/000301.jpg
/home/wangzd/datasets/MOT/MOT16/images/test/MOT16-01/img1/000141.jpg
/home/wangzd/datasets/MOT/MOT16/images/test/MOT16-01/img1/000101.jpg
/home/wangzd/datasets/MOT/MOT16/images/test/MOT16-01/img1/000341.jpg
/home/wangzd/datasets/MOT/MOT16/images/test/MOT16-01/img1/000361.jpg
/home/wangzd/datasets/MOT/MOT16/images/test/MOT16-01/img1/000121.jpg
/home/wangzd/datasets/MOT/MOT16/images/test/MOT16-01/img1/000281.jpg
/home/wangzd/datasets/MOT/MOT16/images/test/MOT16-01/img1/000081.jpg

52
data/mot16-03.test Normal file
View File

@ -0,0 +1,52 @@
/home/wangzd/datasets/MOT/MOT16/images/test/MOT16-03/img1/000391.jpg
/home/wangzd/datasets/MOT/MOT16/images/test/MOT16-03/img1/000811.jpg
/home/wangzd/datasets/MOT/MOT16/images/test/MOT16-03/img1/001471.jpg
/home/wangzd/datasets/MOT/MOT16/images/test/MOT16-03/img1/001021.jpg
/home/wangzd/datasets/MOT/MOT16/images/test/MOT16-03/img1/000061.jpg
/home/wangzd/datasets/MOT/MOT16/images/test/MOT16-03/img1/001261.jpg
/home/wangzd/datasets/MOT/MOT16/images/test/MOT16-03/img1/000021.jpg
/home/wangzd/datasets/MOT/MOT16/images/test/MOT16-03/img1/000871.jpg
/home/wangzd/datasets/MOT/MOT16/images/test/MOT16-03/img1/000631.jpg
/home/wangzd/datasets/MOT/MOT16/images/test/MOT16-03/img1/000241.jpg
/home/wangzd/datasets/MOT/MOT16/images/test/MOT16-03/img1/001411.jpg
/home/wangzd/datasets/MOT/MOT16/images/test/MOT16-03/img1/000781.jpg
/home/wangzd/datasets/MOT/MOT16/images/test/MOT16-03/img1/001201.jpg
/home/wangzd/datasets/MOT/MOT16/images/test/MOT16-03/img1/000001.jpg
/home/wangzd/datasets/MOT/MOT16/images/test/MOT16-03/img1/000451.jpg
/home/wangzd/datasets/MOT/MOT16/images/test/MOT16-03/img1/000661.jpg
/home/wangzd/datasets/MOT/MOT16/images/test/MOT16-03/img1/000421.jpg
/home/wangzd/datasets/MOT/MOT16/images/test/MOT16-03/img1/001441.jpg
/home/wangzd/datasets/MOT/MOT16/images/test/MOT16-03/img1/000211.jpg
/home/wangzd/datasets/MOT/MOT16/images/test/MOT16-03/img1/000991.jpg
/home/wangzd/datasets/MOT/MOT16/images/test/MOT16-03/img1/001051.jpg
/home/wangzd/datasets/MOT/MOT16/images/test/MOT16-03/img1/000181.jpg
/home/wangzd/datasets/MOT/MOT16/images/test/MOT16-03/img1/000601.jpg
/home/wangzd/datasets/MOT/MOT16/images/test/MOT16-03/img1/001381.jpg
/home/wangzd/datasets/MOT/MOT16/images/test/MOT16-03/img1/000011.jpg
/home/wangzd/datasets/MOT/MOT16/images/test/MOT16-03/img1/000841.jpg
/home/wangzd/datasets/MOT/MOT16/images/test/MOT16-03/img1/001231.jpg
/home/wangzd/datasets/MOT/MOT16/images/test/MOT16-03/img1/000031.jpg
/home/wangzd/datasets/MOT/MOT16/images/test/MOT16-03/img1/000271.jpg
/home/wangzd/datasets/MOT/MOT16/images/test/MOT16-03/img1/000931.jpg
/home/wangzd/datasets/MOT/MOT16/images/test/MOT16-03/img1/000301.jpg
/home/wangzd/datasets/MOT/MOT16/images/test/MOT16-03/img1/000751.jpg
/home/wangzd/datasets/MOT/MOT16/images/test/MOT16-03/img1/000481.jpg
/home/wangzd/datasets/MOT/MOT16/images/test/MOT16-03/img1/000511.jpg
/home/wangzd/datasets/MOT/MOT16/images/test/MOT16-03/img1/001291.jpg
/home/wangzd/datasets/MOT/MOT16/images/test/MOT16-03/img1/001141.jpg
/home/wangzd/datasets/MOT/MOT16/images/test/MOT16-03/img1/000091.jpg
/home/wangzd/datasets/MOT/MOT16/images/test/MOT16-03/img1/000361.jpg
/home/wangzd/datasets/MOT/MOT16/images/test/MOT16-03/img1/000121.jpg
/home/wangzd/datasets/MOT/MOT16/images/test/MOT16-03/img1/000571.jpg
/home/wangzd/datasets/MOT/MOT16/images/test/MOT16-03/img1/001321.jpg
/home/wangzd/datasets/MOT/MOT16/images/test/MOT16-03/img1/001111.jpg
/home/wangzd/datasets/MOT/MOT16/images/test/MOT16-03/img1/000151.jpg
/home/wangzd/datasets/MOT/MOT16/images/test/MOT16-03/img1/001081.jpg
/home/wangzd/datasets/MOT/MOT16/images/test/MOT16-03/img1/001351.jpg
/home/wangzd/datasets/MOT/MOT16/images/test/MOT16-03/img1/000901.jpg
/home/wangzd/datasets/MOT/MOT16/images/test/MOT16-03/img1/000331.jpg
/home/wangzd/datasets/MOT/MOT16/images/test/MOT16-03/img1/000721.jpg
/home/wangzd/datasets/MOT/MOT16/images/test/MOT16-03/img1/001171.jpg
/home/wangzd/datasets/MOT/MOT16/images/test/MOT16-03/img1/000961.jpg
/home/wangzd/datasets/MOT/MOT16/images/test/MOT16-03/img1/000541.jpg
/home/wangzd/datasets/MOT/MOT16/images/test/MOT16-03/img1/000691.jpg

38
data/mot16-14.test Normal file
View File

@ -0,0 +1,38 @@
/home/wangzd/datasets/MOT/MOT16/images/test/MOT16-14/img1/000201.jpg
/home/wangzd/datasets/MOT/MOT16/images/test/MOT16-14/img1/000041.jpg
/home/wangzd/datasets/MOT/MOT16/images/test/MOT16-14/img1/000581.jpg
/home/wangzd/datasets/MOT/MOT16/images/test/MOT16-14/img1/000221.jpg
/home/wangzd/datasets/MOT/MOT16/images/test/MOT16-14/img1/000061.jpg
/home/wangzd/datasets/MOT/MOT16/images/test/MOT16-14/img1/000021.jpg
/home/wangzd/datasets/MOT/MOT16/images/test/MOT16-14/img1/000241.jpg
/home/wangzd/datasets/MOT/MOT16/images/test/MOT16-14/img1/000261.jpg
/home/wangzd/datasets/MOT/MOT16/images/test/MOT16-14/img1/000001.jpg
/home/wangzd/datasets/MOT/MOT16/images/test/MOT16-14/img1/000661.jpg
/home/wangzd/datasets/MOT/MOT16/images/test/MOT16-14/img1/000421.jpg
/home/wangzd/datasets/MOT/MOT16/images/test/MOT16-14/img1/000401.jpg
/home/wangzd/datasets/MOT/MOT16/images/test/MOT16-14/img1/000381.jpg
/home/wangzd/datasets/MOT/MOT16/images/test/MOT16-14/img1/000641.jpg
/home/wangzd/datasets/MOT/MOT16/images/test/MOT16-14/img1/000601.jpg
/home/wangzd/datasets/MOT/MOT16/images/test/MOT16-14/img1/000181.jpg
/home/wangzd/datasets/MOT/MOT16/images/test/MOT16-14/img1/000441.jpg
/home/wangzd/datasets/MOT/MOT16/images/test/MOT16-14/img1/000461.jpg
/home/wangzd/datasets/MOT/MOT16/images/test/MOT16-14/img1/000621.jpg
/home/wangzd/datasets/MOT/MOT16/images/test/MOT16-14/img1/000161.jpg
/home/wangzd/datasets/MOT/MOT16/images/test/MOT16-14/img1/000321.jpg
/home/wangzd/datasets/MOT/MOT16/images/test/MOT16-14/img1/000301.jpg
/home/wangzd/datasets/MOT/MOT16/images/test/MOT16-14/img1/000481.jpg
/home/wangzd/datasets/MOT/MOT16/images/test/MOT16-14/img1/000141.jpg
/home/wangzd/datasets/MOT/MOT16/images/test/MOT16-14/img1/000681.jpg
/home/wangzd/datasets/MOT/MOT16/images/test/MOT16-14/img1/000101.jpg
/home/wangzd/datasets/MOT/MOT16/images/test/MOT16-14/img1/000341.jpg
/home/wangzd/datasets/MOT/MOT16/images/test/MOT16-14/img1/000361.jpg
/home/wangzd/datasets/MOT/MOT16/images/test/MOT16-14/img1/000121.jpg
/home/wangzd/datasets/MOT/MOT16/images/test/MOT16-14/img1/000741.jpg
/home/wangzd/datasets/MOT/MOT16/images/test/MOT16-14/img1/000501.jpg
/home/wangzd/datasets/MOT/MOT16/images/test/MOT16-14/img1/000281.jpg
/home/wangzd/datasets/MOT/MOT16/images/test/MOT16-14/img1/000521.jpg
/home/wangzd/datasets/MOT/MOT16/images/test/MOT16-14/img1/000721.jpg
/home/wangzd/datasets/MOT/MOT16/images/test/MOT16-14/img1/000561.jpg
/home/wangzd/datasets/MOT/MOT16/images/test/MOT16-14/img1/000541.jpg
/home/wangzd/datasets/MOT/MOT16/images/test/MOT16-14/img1/000081.jpg
/home/wangzd/datasets/MOT/MOT16/images/test/MOT16-14/img1/000701.jpg

File diff suppressed because it is too large Load Diff

File diff suppressed because it is too large Load Diff

File diff suppressed because it is too large Load Diff

File diff suppressed because it is too large Load Diff

128
demo.py
View File

@ -1,64 +1,64 @@
import os
import os.path as osp
import cv2
import logging
import argparse
import motmetrics as mm
from tracker.multitracker import JDETracker
from utils import visualization as vis
from utils.utils import *
from utils.io import read_results
from utils.log import logger
from utils.timer import Timer
from utils.evaluation import Evaluator
import utils.datasets as datasets
import torch
from track import eval_seq
def track(opt):
logger.setLevel(logging.INFO)
result_root = opt.output_root if opt.output_root!='' else '.'
mkdir_if_missing(result_root)
# run tracking
timer = Timer()
accs = []
n_frame = 0
logger.info('start tracking...')
dataloader = datasets.LoadVideo(opt.input_video, opt.img_size)
result_filename = os.path.join(result_root, 'results.txt')
frame_rate = dataloader.frame_rate
frame_dir = None if opt.output_format=='text' else osp.join(result_root, 'frame')
try:
eval_seq(opt, dataloader, 'mot', result_filename,
save_dir=frame_dir, show_image=False, frame_rate=frame_rate)
except Exception as e:
logger.info(e)
if opt.output_format == 'video':
output_video_path = osp.join(result_root, 'result.mp4')
cmd_str = 'ffmpeg -f image2 -i {}/%05d.jpg -c:v copy {}'.format(osp.join(result_root, 'frame'), output_video_path)
os.system(cmd_str)
if __name__ == '__main__':
parser = argparse.ArgumentParser(prog='demo.py')
parser.add_argument('--cfg', type=str, default='cfg/yolov3.cfg', help='cfg file path')
parser.add_argument('--weights', type=str, default='weights/latest.pt', help='path to weights file')
parser.add_argument('--img-size', type=int, default=(1088, 608), help='size of each image dimension')
parser.add_argument('--iou-thres', type=float, default=0.5, help='iou threshold required to qualify as detected')
parser.add_argument('--conf-thres', type=float, default=0.5, help='object confidence threshold')
parser.add_argument('--nms-thres', type=float, default=0.4, help='iou threshold for non-maximum suppression')
parser.add_argument('--min-box-area', type=float, default=200, help='filter out tiny boxes')
parser.add_argument('--track-buffer', type=int, default=30, help='tracking buffer')
parser.add_argument('--input-video', type=str, help='path to the input video')
parser.add_argument('--output-format', type=str, default='video', help='expected output format, can be video, or text')
parser.add_argument('--output-root', type=str, default='results', help='expected output root path')
opt = parser.parse_args()
print(opt, end='\n\n')
track(opt)
import os
import os.path as osp
import cv2
import logging
import argparse
import motmetrics as mm
from tracker.multitracker import JDETracker
from utils import visualization as vis
from utils.utils import *
from utils.io import read_results
from utils.log import logger
from utils.timer import Timer
from utils.evaluation import Evaluator
import utils.datasets as datasets
import torch
from track import eval_seq
def track(opt):
logger.setLevel(logging.INFO)
result_root = opt.output_root if opt.output_root!='' else '.'
mkdir_if_missing(result_root)
# run tracking
timer = Timer()
accs = []
n_frame = 0
logger.info('start tracking...')
dataloader = datasets.LoadVideo(opt.input_video, opt.img_size)
result_filename = os.path.join(result_root, 'results.txt')
frame_rate = dataloader.frame_rate
frame_dir = None if opt.output_format=='text' else osp.join(result_root, 'frame')
try:
eval_seq(opt, dataloader, 'mot', result_filename,
save_dir=frame_dir, show_image=False, frame_rate=frame_rate)
except Exception as e:
logger.info(e)
if opt.output_format == 'video':
output_video_path = osp.join(result_root, 'result.mp4')
cmd_str = 'ffmpeg -f image2 -i {}/%05d.jpg -c:v copy {}'.format(osp.join(result_root, 'frame'), output_video_path)
os.system(cmd_str)
if __name__ == '__main__':
parser = argparse.ArgumentParser(prog='demo.py')
parser.add_argument('--cfg', type=str, default='cfg/yolov3.cfg', help='cfg file path')
parser.add_argument('--weights', type=str, default='weights/latest.pt', help='path to weights file')
parser.add_argument('--img-size', type=int, default=(1088, 608), help='size of each image dimension')
parser.add_argument('--iou-thres', type=float, default=0.5, help='iou threshold required to qualify as detected')
parser.add_argument('--conf-thres', type=float, default=0.5, help='object confidence threshold')
parser.add_argument('--nms-thres', type=float, default=0.4, help='iou threshold for non-maximum suppression')
parser.add_argument('--min-box-area', type=float, default=200, help='filter out tiny boxes')
parser.add_argument('--track-buffer', type=int, default=30, help='tracking buffer')
parser.add_argument('--input-video', type=str, help='path to the input video')
parser.add_argument('--output-format', type=str, default='video', help='expected output format, can be video, or text')
parser.add_argument('--output-root', type=str, default='results', help='expected output root path')
opt = parser.parse_args()
print(opt, end='\n\n')
track(opt)

View File

@ -1,98 +1,98 @@
import argparse
import json
import time
from pathlib import Path
from sklearn import metrics
from scipy import interpolate
import torch.nn.functional as F
from models import *
from utils.utils import *
from torchvision.transforms import transforms as T
from utils.datasets import LoadImages, JointDataset, collate_fn
def extract_ped_per_frame(
cfg,
input_root,
output_root,
weights,
batch_size=16,
img_size=416,
iou_thres=0.5,
conf_thres=0.3,
nms_thres=0.45,
print_interval=40,
nID=14455,
):
mkdir_if_missing(output_root)
# Initialize model
model = Darknet(cfg, img_size, nID)
# Load weights
if weights.endswith('.pt'): # pytorch format
model.load_state_dict(torch.load(weights, map_location='cpu')['model'], strict=False)
else: # darknet format
load_darknet_weights(model, weights)
model = torch.nn.DataParallel(model)
model.cuda().eval()
vlist = os.listdir(input_root)
vlist = [osp.join(input_root, v, 'img1') for v in vlist]
for vpath in vlist:
vroot = osp.join('/',*vpath.split('/')[:-1])
out_vroot = vroot.replace(input_root, output_root)
mkdir_if_missing(out_vroot)
dataloader = LoadImages(vpath, img_size)
for frame_id, (frame_path, frame, frame_ori) in enumerate(dataloader):
frame_ground_id = frame_path.split('/')[-1].split('.')[0]
if frame_id % 20 == 0:
print('Processing frame {} of video {}'.format(frame_id, frame_path))
blob = torch.from_numpy(frame).cuda().unsqueeze(0)
pred = model(blob)
pred = pred[pred[:,:,4] > conf_thres]
if len(pred) > 0:
dets = non_max_suppression(pred.unsqueeze(0), conf_thres, nms_thres)[0].cpu()
scale_coords(img_size, dets[:, :4], frame_ori.shape).round()
frame_dir = osp.join(out_vroot, frame_ground_id)
mkdir_if_missing(frame_dir)
dets = dets[:, :5]
for ped_id, det in enumerate(dets):
box = det[:4].int()
conf = det[4]
ped = frame_ori[box[1]:box[3], box[0]:box[2]]
ped_path = osp.join(frame_dir, ('{:04d}_'+ '{:d}_'*4 + '{:.2f}.jpg').format(ped_id, *box, conf))
cv2.imwrite(ped_path, ped)
if __name__ == '__main__':
parser = argparse.ArgumentParser(prog='test.py')
parser.add_argument('--batch-size', type=int, default=40, help='size of each image batch')
parser.add_argument('--cfg', type=str, default='cfg/yolov3.cfg', help='cfg file path')
parser.add_argument('--weights', type=str, default='weights/mot_64/latest.pt', help='path to weights file')
parser.add_argument('--iou-thres', type=float, default=0.3, help='iou threshold required to qualify as detected')
parser.add_argument('--conf-thres', type=float, default=0.3, help='object confidence threshold')
parser.add_argument('--nms-thres', type=float, default=0.3, help='iou threshold for non-maximum suppression')
parser.add_argument('--img-size', type=int, default=(1088, 608), help='size of each image dimension')
parser.add_argument('--print-interval', type=int, default=10, help='size of each image dimension')
parser.add_argument('--input-root', type=str, default='/home/wangzd/datasets/youtube/data/0004/frame', help='path to input frames')
parser.add_argument('--output-root', type=str, default='/home/wangzd/datasets/youtube/data/0004/ped_per_frame', help='path to output frames')
opt = parser.parse_args()
print(opt, end='\n\n')
with torch.no_grad():
extract_ped_per_frame(
opt.cfg,
opt.input_root,
opt.output_root,
opt.weights,
opt.batch_size,
opt.img_size,
opt.iou_thres,
opt.conf_thres,
opt.nms_thres,
opt.print_interval,
)
import argparse
import json
import time
from pathlib import Path
from sklearn import metrics
from scipy import interpolate
import torch.nn.functional as F
from models import *
from utils.utils import *
from torchvision.transforms import transforms as T
from utils.datasets import LoadImages, JointDataset, collate_fn
def extract_ped_per_frame(
cfg,
input_root,
output_root,
weights,
batch_size=16,
img_size=416,
iou_thres=0.5,
conf_thres=0.3,
nms_thres=0.45,
print_interval=40,
nID=14455,
):
mkdir_if_missing(output_root)
# Initialize model
model = Darknet(cfg, img_size, nID)
# Load weights
if weights.endswith('.pt'): # pytorch format
model.load_state_dict(torch.load(weights, map_location='cpu')['model'], strict=False)
else: # darknet format
load_darknet_weights(model, weights)
model = torch.nn.DataParallel(model)
model.cuda().eval()
vlist = os.listdir(input_root)
vlist = [osp.join(input_root, v, 'img1') for v in vlist]
for vpath in vlist:
vroot = osp.join('/',*vpath.split('/')[:-1])
out_vroot = vroot.replace(input_root, output_root)
mkdir_if_missing(out_vroot)
dataloader = LoadImages(vpath, img_size)
for frame_id, (frame_path, frame, frame_ori) in enumerate(dataloader):
frame_ground_id = frame_path.split('/')[-1].split('.')[0]
if frame_id % 20 == 0:
print('Processing frame {} of video {}'.format(frame_id, frame_path))
blob = torch.from_numpy(frame).cuda().unsqueeze(0)
pred = model(blob)
pred = pred[pred[:,:,4] > conf_thres]
if len(pred) > 0:
dets = non_max_suppression(pred.unsqueeze(0), conf_thres, nms_thres)[0].cpu()
scale_coords(img_size, dets[:, :4], frame_ori.shape).round()
frame_dir = osp.join(out_vroot, frame_ground_id)
mkdir_if_missing(frame_dir)
dets = dets[:, :5]
for ped_id, det in enumerate(dets):
box = det[:4].int()
conf = det[4]
ped = frame_ori[box[1]:box[3], box[0]:box[2]]
ped_path = osp.join(frame_dir, ('{:04d}_'+ '{:d}_'*4 + '{:.2f}.jpg').format(ped_id, *box, conf))
cv2.imwrite(ped_path, ped)
if __name__ == '__main__':
parser = argparse.ArgumentParser(prog='test.py')
parser.add_argument('--batch-size', type=int, default=40, help='size of each image batch')
parser.add_argument('--cfg', type=str, default='cfg/yolov3.cfg', help='cfg file path')
parser.add_argument('--weights', type=str, default='weights/mot_64/latest.pt', help='path to weights file')
parser.add_argument('--iou-thres', type=float, default=0.3, help='iou threshold required to qualify as detected')
parser.add_argument('--conf-thres', type=float, default=0.3, help='object confidence threshold')
parser.add_argument('--nms-thres', type=float, default=0.3, help='iou threshold for non-maximum suppression')
parser.add_argument('--img-size', type=int, default=(1088, 608), help='size of each image dimension')
parser.add_argument('--print-interval', type=int, default=10, help='size of each image dimension')
parser.add_argument('--input-root', type=str, default='/home/wangzd/datasets/youtube/data/0004/frame', help='path to input frames')
parser.add_argument('--output-root', type=str, default='/home/wangzd/datasets/youtube/data/0004/ped_per_frame', help='path to output frames')
opt = parser.parse_args()
print(opt, end='\n\n')
with torch.no_grad():
extract_ped_per_frame(
opt.cfg,
opt.input_root,
opt.output_root,
opt.weights,
opt.batch_size,
opt.img_size,
opt.iou_thres,
opt.conf_thres,
opt.nms_thres,
opt.print_interval,
)

754
models.py
View File

@ -1,374 +1,380 @@
import os
from collections import defaultdict,OrderedDict
import torch.nn as nn
from utils.parse_config import *
from utils.utils import *
from utils.syncbn import SyncBN
import time
import math
batch_norm=SyncBN #nn.BatchNorm2d
def create_modules(module_defs):
"""
Constructs module list of layer blocks from module configuration in module_defs
"""
hyperparams = module_defs.pop(0)
output_filters = [int(hyperparams['channels'])]
module_list = nn.ModuleList()
yolo_layer_count = 0
for i, module_def in enumerate(module_defs):
modules = nn.Sequential()
if module_def['type'] == 'convolutional':
bn = int(module_def['batch_normalize'])
filters = int(module_def['filters'])
kernel_size = int(module_def['size'])
pad = (kernel_size - 1) // 2 if int(module_def['pad']) else 0
modules.add_module('conv_%d' % i, nn.Conv2d(in_channels=output_filters[-1],
out_channels=filters,
kernel_size=kernel_size,
stride=int(module_def['stride']),
padding=pad,
bias=not bn))
if bn:
modules.add_module('batch_norm_%d' % i, batch_norm(filters))
if module_def['activation'] == 'leaky':
modules.add_module('leaky_%d' % i, nn.LeakyReLU(0.1))
elif module_def['type'] == 'maxpool':
kernel_size = int(module_def['size'])
stride = int(module_def['stride'])
if kernel_size == 2 and stride == 1:
modules.add_module('_debug_padding_%d' % i, nn.ZeroPad2d((0, 1, 0, 1)))
maxpool = nn.MaxPool2d(kernel_size=kernel_size, stride=stride, padding=int((kernel_size - 1) // 2))
modules.add_module('maxpool_%d' % i, maxpool)
elif module_def['type'] == 'upsample':
upsample = Upsample(scale_factor=int(module_def['stride']))
modules.add_module('upsample_%d' % i, upsample)
elif module_def['type'] == 'route':
layers = [int(x) for x in module_def['layers'].split(',')]
filters = sum([output_filters[i + 1 if i > 0 else i] for i in layers])
modules.add_module('route_%d' % i, EmptyLayer())
elif module_def['type'] == 'shortcut':
filters = output_filters[int(module_def['from'])]
modules.add_module('shortcut_%d' % i, EmptyLayer())
elif module_def['type'] == 'yolo':
anchor_idxs = [int(x) for x in module_def['mask'].split(',')]
# Extract anchors
anchors = [float(x) for x in module_def['anchors'].split(',')]
anchors = [(anchors[i], anchors[i + 1]) for i in range(0, len(anchors), 2)]
anchors = [anchors[i] for i in anchor_idxs]
nC = int(module_def['classes']) # number of classes
img_size = (int(hyperparams['width']),int(hyperparams['height']))
# Define detection layer
yolo_layer = YOLOLayer(anchors, nC, hyperparams['nID'], img_size, yolo_layer_count, cfg=hyperparams['cfg'])
modules.add_module('yolo_%d' % i, yolo_layer)
yolo_layer_count += 1
# Register module list and number of output filters
module_list.append(modules)
output_filters.append(filters)
return hyperparams, module_list
class EmptyLayer(nn.Module):
"""Placeholder for 'route' and 'shortcut' layers"""
def __init__(self):
super(EmptyLayer, self).__init__()
def forward(self, x):
return x
class Upsample(nn.Module):
# Custom Upsample layer (nn.Upsample gives deprecated warning message)
def __init__(self, scale_factor=1, mode='nearest'):
super(Upsample, self).__init__()
self.scale_factor = scale_factor
self.mode = mode
def forward(self, x):
return F.interpolate(x, scale_factor=self.scale_factor, mode=self.mode)
class YOLOLayer(nn.Module):
def __init__(self, anchors, nC, nID, img_size, yolo_layer, cfg):
super(YOLOLayer, self).__init__()
self.layer = yolo_layer
nA = len(anchors)
self.anchors = torch.FloatTensor(anchors)
self.nA = nA # number of anchors (3)
self.nC = nC # number of classes (80)
self.nID = nID # number of identities
self.img_size = 0
self.emb_dim = 512
self.SmoothL1Loss = nn.SmoothL1Loss()
self.SoftmaxLoss = nn.CrossEntropyLoss(ignore_index=-1)
self.CrossEntropyLoss = nn.CrossEntropyLoss()
self.IDLoss = nn.CrossEntropyLoss(ignore_index=-1)
self.s_c = nn.Parameter(-4.15*torch.ones(1)) # -4.15
self.s_r = nn.Parameter(-4.85*torch.ones(1)) # -4.85
self.s_id = nn.Parameter(-2.3*torch.ones(1)) # -2.3
self.emb_scale = math.sqrt(2) * math.log(self.nID-1)
def forward(self, p_cat, img_size, targets=None, classifier=None, test_emb=False):
p, p_emb = p_cat[:, :24, ...], p_cat[:, 24:, ...]
nB, nGh, nGw = p.shape[0], p.shape[-2], p.shape[-1]
if self.img_size != img_size:
create_grids(self, img_size, nGh, nGw)
if p.is_cuda:
self.grid_xy = self.grid_xy.cuda()
self.anchor_wh = self.anchor_wh.cuda()
p = p.view(nB, self.nA, self.nC + 5, nGh, nGw).permute(0, 1, 3, 4, 2).contiguous() # prediction
p_emb = p_emb.permute(0,2,3,1).contiguous()
p_box = p[..., :4]
p_conf = p[..., 4:6].permute(0, 4, 1, 2, 3) # Conf
# Training
if targets is not None:
if test_emb:
tconf, tbox, tids = build_targets_max(targets, self.anchor_vec.cuda(), self.nA, self.nC, nGh, nGw)
else:
tconf, tbox, tids = build_targets_thres(targets, self.anchor_vec.cuda(), self.nA, self.nC, nGh, nGw)
tconf, tbox, tids = tconf.cuda(), tbox.cuda(), tids.cuda()
mask = tconf > 0
# Compute losses
nT = sum([len(x) for x in targets]) # number of targets
nM = mask.sum().float() # number of anchors (assigned to targets)
nP = torch.ones_like(mask).sum().float()
if nM > 0:
lbox = self.SmoothL1Loss(p_box[mask], tbox[mask])
else:
FT = torch.cuda.FloatTensor if p_conf.is_cuda else torch.FloatTensor
lbox, lconf = FT([0]), FT([0])
lconf = self.SoftmaxLoss(p_conf, tconf)
lid = torch.Tensor(1).fill_(0).squeeze().cuda()
emb_mask,_ = mask.max(1)
# For convenience we use max(1) to decide the id, TODO: more reseanable strategy
tids,_ = tids.max(1)
tids = tids[emb_mask]
embedding = p_emb[emb_mask].contiguous()
embedding = self.emb_scale * F.normalize(embedding)
nI = emb_mask.sum().float()
if test_emb:
if np.prod(embedding.shape)==0 or np.prod(tids.shape) == 0:
return torch.zeros(0, self. emb_dim+1).cuda()
emb_and_gt = torch.cat([embedding, tids.float()], dim=1)
return emb_and_gt
if len(embedding) > 1:
logits = classifier(embedding).contiguous()
lid = self.IDLoss(logits, tids.squeeze())
# Sum loss components
loss = torch.exp(-self.s_r)*lbox + torch.exp(-self.s_c)*lconf + torch.exp(-self.s_id)*lid + \
(self.s_r + self.s_c + self.s_id)
loss *= 0.5
return loss, loss.item(), lbox.item(), lconf.item(), lid.item(), nT
else:
p_conf = torch.softmax(p_conf, dim=1)[:,1,...].unsqueeze(-1)
p_emb = p_emb.unsqueeze(1).repeat(1,self.nA,1,1,1).contiguous()
p_cls = torch.zeros(nB,self.nA,nGh,nGw,1).cuda() # Temp
p = torch.cat([p_box, p_conf, p_cls, p_emb], dim=-1)
p[..., :4] = decode_delta_map(p[..., :4], self.anchor_vec.to(p))
p[..., :4] *= self.stride
return p.view(nB, -1, p.shape[-1])
class Darknet(nn.Module):
"""YOLOv3 object detection model"""
def __init__(self, cfg_path, img_size=(1088, 608), nID=1591, test_emb=False):
super(Darknet, self).__init__()
self.module_defs = parse_model_cfg(cfg_path)
self.module_defs[0]['cfg'] = cfg_path
self.module_defs[0]['nID'] = nID
self.hyperparams, self.module_list = create_modules(self.module_defs)
self.img_size = img_size
self.loss_names = ['loss', 'box', 'conf', 'id', 'nT']
self.losses = OrderedDict()
for ln in self.loss_names:
self.losses[ln] = 0
self.emb_dim = 512
self.classifier = nn.Linear(self.emb_dim, nID)
self.test_emb=test_emb
def forward(self, x, targets=None, targets_len=None):
self.losses = OrderedDict()
for ln in self.loss_names:
self.losses[ln] = 0
is_training = (targets is not None) and (not self.test_emb)
#img_size = x.shape[-1]
layer_outputs = []
output = []
for i, (module_def, module) in enumerate(zip(self.module_defs, self.module_list)):
mtype = module_def['type']
if mtype in ['convolutional', 'upsample', 'maxpool']:
x = module(x)
elif mtype == 'route':
layer_i = [int(x) for x in module_def['layers'].split(',')]
if len(layer_i) == 1:
x = layer_outputs[layer_i[0]]
else:
x = torch.cat([layer_outputs[i] for i in layer_i], 1)
elif mtype == 'shortcut':
layer_i = int(module_def['from'])
x = layer_outputs[-1] + layer_outputs[layer_i]
elif mtype == 'yolo':
if is_training: # get loss
targets = [targets[i][:int(l)] for i,l in enumerate(targets_len)]
x, *losses = module[0](x, self.img_size, targets, self.classifier)
for name, loss in zip(self.loss_names, losses):
self.losses[name] += loss
elif self.test_emb:
targets = [targets[i][:int(l)] for i,l in enumerate(targets_len)]
x = module[0](x, self.img_size, targets, self.classifier, self.test_emb)
else: # get detections
x = module[0](x, self.img_size)
output.append(x)
layer_outputs.append(x)
if is_training:
self.losses['nT'] /= 3
output = [o.squeeze() for o in output]
return sum(output), torch.Tensor(list(self.losses.values())).cuda()
elif self.test_emb:
return torch.cat(output, 0)
return torch.cat(output, 1)
def create_grids(self, img_size, nGh, nGw):
self.stride = img_size[0]/nGw
assert self.stride == img_size[1] / nGh
# build xy offsets
grid_x = torch.arange(nGw).repeat((nGh, 1)).view((1, 1, nGh, nGw)).float()
grid_y = torch.arange(nGh).repeat((nGw, 1)).transpose(0,1).view((1, 1, nGh, nGw)).float()
#grid_y = grid_x.permute(0, 1, 3, 2)
self.grid_xy = torch.stack((grid_x, grid_y), 4)
# build wh gains
self.anchor_vec = self.anchors / self.stride
self.anchor_wh = self.anchor_vec.view(1, self.nA, 1, 1, 2)
def load_darknet_weights(self, weights, cutoff=-1):
# Parses and loads the weights stored in 'weights'
# cutoff: save layers between 0 and cutoff (if cutoff = -1 all are saved)
weights_file = weights.split(os.sep)[-1]
# Try to download weights if not available locally
if not os.path.isfile(weights):
try:
os.system('wget https://pjreddie.com/media/files/' + weights_file + ' -O ' + weights)
except IOError:
print(weights + ' not found')
# Establish cutoffs
if weights_file == 'darknet53.conv.74':
cutoff = 75
elif weights_file == 'yolov3-tiny.conv.15':
cutoff = 15
# Open the weights file
fp = open(weights, 'rb')
header = np.fromfile(fp, dtype=np.int32, count=5) # First five are header values
# Needed to write header when saving weights
self.header_info = header
self.seen = header[3] # number of images seen during training
weights = np.fromfile(fp, dtype=np.float32) # The rest are weights
fp.close()
ptr = 0
for i, (module_def, module) in enumerate(zip(self.module_defs[:cutoff], self.module_list[:cutoff])):
if module_def['type'] == 'convolutional':
conv_layer = module[0]
if module_def['batch_normalize']:
# Load BN bias, weights, running mean and running variance
bn_layer = module[1]
num_b = bn_layer.bias.numel() # Number of biases
# Bias
bn_b = torch.from_numpy(weights[ptr:ptr + num_b]).view_as(bn_layer.bias)
bn_layer.bias.data.copy_(bn_b)
ptr += num_b
# Weight
bn_w = torch.from_numpy(weights[ptr:ptr + num_b]).view_as(bn_layer.weight)
bn_layer.weight.data.copy_(bn_w)
ptr += num_b
# Running Mean
bn_rm = torch.from_numpy(weights[ptr:ptr + num_b]).view_as(bn_layer.running_mean)
bn_layer.running_mean.data.copy_(bn_rm)
ptr += num_b
# Running Var
bn_rv = torch.from_numpy(weights[ptr:ptr + num_b]).view_as(bn_layer.running_var)
bn_layer.running_var.data.copy_(bn_rv)
ptr += num_b
else:
# Load conv. bias
num_b = conv_layer.bias.numel()
conv_b = torch.from_numpy(weights[ptr:ptr + num_b]).view_as(conv_layer.bias)
conv_layer.bias.data.copy_(conv_b)
ptr += num_b
# Load conv. weights
num_w = conv_layer.weight.numel()
conv_w = torch.from_numpy(weights[ptr:ptr + num_w]).view_as(conv_layer.weight)
conv_layer.weight.data.copy_(conv_w)
ptr += num_w
"""
@:param path - path of the new weights file
@:param cutoff - save layers between 0 and cutoff (cutoff = -1 -> all are saved)
"""
def save_weights(self, path, cutoff=-1):
fp = open(path, 'wb')
self.header_info[3] = self.seen # number of images seen during training
self.header_info.tofile(fp)
# Iterate through layers
for i, (module_def, module) in enumerate(zip(self.module_defs[:cutoff], self.module_list[:cutoff])):
if module_def['type'] == 'convolutional':
conv_layer = module[0]
# If batch norm, load bn first
if module_def['batch_normalize']:
bn_layer = module[1]
bn_layer.bias.data.cpu().numpy().tofile(fp)
bn_layer.weight.data.cpu().numpy().tofile(fp)
bn_layer.running_mean.data.cpu().numpy().tofile(fp)
bn_layer.running_var.data.cpu().numpy().tofile(fp)
# Load conv bias
else:
conv_layer.bias.data.cpu().numpy().tofile(fp)
# Load conv weights
conv_layer.weight.data.cpu().numpy().tofile(fp)
fp.close()
import os
from collections import defaultdict,OrderedDict
import torch.nn as nn
from utils.parse_config import *
from utils.utils import *
from utils.syncbn import SyncBN
import time
import math
batch_norm=SyncBN #nn.BatchNorm2d
#batch_norm=nn.BatchNorm2d
def create_modules(module_defs):
"""
Constructs module list of layer blocks from module configuration in module_defs
"""
hyperparams = module_defs.pop(0)
output_filters = [int(hyperparams['channels'])]
module_list = nn.ModuleList()
yolo_layer_count = 0
for i, module_def in enumerate(module_defs):
modules = nn.Sequential()
if module_def['type'] == 'convolutional':
bn = int(module_def['batch_normalize'])
filters = int(module_def['filters'])
kernel_size = int(module_def['size'])
pad = (kernel_size - 1) // 2 if int(module_def['pad']) else 0
modules.add_module('conv_%d' % i, nn.Conv2d(in_channels=output_filters[-1],
out_channels=filters,
kernel_size=kernel_size,
stride=int(module_def['stride']),
padding=pad,
bias=not bn))
if bn:
after_bn = batch_norm(filters)
modules.add_module('batch_norm_%d' % i, after_bn)
# BN is uniformly initialized by default in pytorch 1.0.1.
# In pytorch>1.2.0, BN weights are initialized with constant 1,
# but we find with the uniform initialization the model converges faster.
nn.init.uniform_(after_bn.weight)
nn.init.zeros_(after_bn.bias)
if module_def['activation'] == 'leaky':
modules.add_module('leaky_%d' % i, nn.LeakyReLU(0.1))
elif module_def['type'] == 'maxpool':
kernel_size = int(module_def['size'])
stride = int(module_def['stride'])
if kernel_size == 2 and stride == 1:
modules.add_module('_debug_padding_%d' % i, nn.ZeroPad2d((0, 1, 0, 1)))
maxpool = nn.MaxPool2d(kernel_size=kernel_size, stride=stride, padding=int((kernel_size - 1) // 2))
modules.add_module('maxpool_%d' % i, maxpool)
elif module_def['type'] == 'upsample':
upsample = Upsample(scale_factor=int(module_def['stride']))
modules.add_module('upsample_%d' % i, upsample)
elif module_def['type'] == 'route':
layers = [int(x) for x in module_def['layers'].split(',')]
filters = sum([output_filters[i + 1 if i > 0 else i] for i in layers])
modules.add_module('route_%d' % i, EmptyLayer())
elif module_def['type'] == 'shortcut':
filters = output_filters[int(module_def['from'])]
modules.add_module('shortcut_%d' % i, EmptyLayer())
elif module_def['type'] == 'yolo':
anchor_idxs = [int(x) for x in module_def['mask'].split(',')]
# Extract anchors
anchors = [float(x) for x in module_def['anchors'].split(',')]
anchors = [(anchors[i], anchors[i + 1]) for i in range(0, len(anchors), 2)]
anchors = [anchors[i] for i in anchor_idxs]
nC = int(module_def['classes']) # number of classes
img_size = (int(hyperparams['width']),int(hyperparams['height']))
# Define detection layer
yolo_layer = YOLOLayer(anchors, nC, hyperparams['nID'], img_size, yolo_layer_count, cfg=hyperparams['cfg'])
modules.add_module('yolo_%d' % i, yolo_layer)
yolo_layer_count += 1
# Register module list and number of output filters
module_list.append(modules)
output_filters.append(filters)
return hyperparams, module_list
class EmptyLayer(nn.Module):
"""Placeholder for 'route' and 'shortcut' layers"""
def __init__(self):
super(EmptyLayer, self).__init__()
def forward(self, x):
return x
class Upsample(nn.Module):
# Custom Upsample layer (nn.Upsample gives deprecated warning message)
def __init__(self, scale_factor=1, mode='nearest'):
super(Upsample, self).__init__()
self.scale_factor = scale_factor
self.mode = mode
def forward(self, x):
return F.interpolate(x, scale_factor=self.scale_factor, mode=self.mode)
class YOLOLayer(nn.Module):
def __init__(self, anchors, nC, nID, img_size, yolo_layer, cfg):
super(YOLOLayer, self).__init__()
self.layer = yolo_layer
nA = len(anchors)
self.anchors = torch.FloatTensor(anchors)
self.nA = nA # number of anchors (3)
self.nC = nC # number of classes (80)
self.nID = nID # number of identities
self.img_size = 0
self.emb_dim = 512
self.SmoothL1Loss = nn.SmoothL1Loss()
self.SoftmaxLoss = nn.CrossEntropyLoss(ignore_index=-1)
self.CrossEntropyLoss = nn.CrossEntropyLoss()
self.IDLoss = nn.CrossEntropyLoss(ignore_index=-1)
self.s_c = nn.Parameter(-4.15*torch.ones(1)) # -4.15
self.s_r = nn.Parameter(-4.85*torch.ones(1)) # -4.85
self.s_id = nn.Parameter(-2.3*torch.ones(1)) # -2.3
self.emb_scale = math.sqrt(2) * math.log(self.nID-1)
def forward(self, p_cat, img_size, targets=None, classifier=None, test_emb=False):
p, p_emb = p_cat[:, :24, ...], p_cat[:, 24:, ...]
nB, nGh, nGw = p.shape[0], p.shape[-2], p.shape[-1]
if self.img_size != img_size:
create_grids(self, img_size, nGh, nGw)
if p.is_cuda:
self.grid_xy = self.grid_xy.cuda()
self.anchor_wh = self.anchor_wh.cuda()
p = p.view(nB, self.nA, self.nC + 5, nGh, nGw).permute(0, 1, 3, 4, 2).contiguous() # prediction
p_emb = p_emb.permute(0,2,3,1).contiguous()
p_box = p[..., :4]
p_conf = p[..., 4:6].permute(0, 4, 1, 2, 3) # Conf
# Training
if targets is not None:
if test_emb:
tconf, tbox, tids = build_targets_max(targets, self.anchor_vec.cuda(), self.nA, self.nC, nGh, nGw)
else:
tconf, tbox, tids = build_targets_thres(targets, self.anchor_vec.cuda(), self.nA, self.nC, nGh, nGw)
tconf, tbox, tids = tconf.cuda(), tbox.cuda(), tids.cuda()
mask = tconf > 0
# Compute losses
nT = sum([len(x) for x in targets]) # number of targets
nM = mask.sum().float() # number of anchors (assigned to targets)
nP = torch.ones_like(mask).sum().float()
if nM > 0:
lbox = self.SmoothL1Loss(p_box[mask], tbox[mask])
else:
FT = torch.cuda.FloatTensor if p_conf.is_cuda else torch.FloatTensor
lbox, lconf = FT([0]), FT([0])
lconf = self.SoftmaxLoss(p_conf, tconf)
lid = torch.Tensor(1).fill_(0).squeeze().cuda()
emb_mask,_ = mask.max(1)
# For convenience we use max(1) to decide the id, TODO: more reseanable strategy
tids,_ = tids.max(1)
tids = tids[emb_mask]
embedding = p_emb[emb_mask].contiguous()
embedding = self.emb_scale * F.normalize(embedding)
nI = emb_mask.sum().float()
if test_emb:
if np.prod(embedding.shape)==0 or np.prod(tids.shape) == 0:
return torch.zeros(0, self. emb_dim+1).cuda()
emb_and_gt = torch.cat([embedding, tids.float()], dim=1)
return emb_and_gt
if len(embedding) > 1:
logits = classifier(embedding).contiguous()
lid = self.IDLoss(logits, tids.squeeze())
# Sum loss components
loss = torch.exp(-self.s_r)*lbox + torch.exp(-self.s_c)*lconf + torch.exp(-self.s_id)*lid + \
(self.s_r + self.s_c + self.s_id)
loss *= 0.5
return loss, loss.item(), lbox.item(), lconf.item(), lid.item(), nT
else:
p_conf = torch.softmax(p_conf, dim=1)[:,1,...].unsqueeze(-1)
p_emb = p_emb.unsqueeze(1).repeat(1,self.nA,1,1,1).contiguous()
p_cls = torch.zeros(nB,self.nA,nGh,nGw,1).cuda() # Temp
p = torch.cat([p_box, p_conf, p_cls, p_emb], dim=-1)
p[..., :4] = decode_delta_map(p[..., :4], self.anchor_vec.to(p))
p[..., :4] *= self.stride
return p.view(nB, -1, p.shape[-1])
class Darknet(nn.Module):
"""YOLOv3 object detection model"""
def __init__(self, cfg_path, img_size=(1088, 608), nID=1591, test_emb=False):
super(Darknet, self).__init__()
self.module_defs = parse_model_cfg(cfg_path)
self.module_defs[0]['cfg'] = cfg_path
self.module_defs[0]['nID'] = nID
self.hyperparams, self.module_list = create_modules(self.module_defs)
self.img_size = img_size
self.loss_names = ['loss', 'box', 'conf', 'id', 'nT']
self.losses = OrderedDict()
for ln in self.loss_names:
self.losses[ln] = 0
self.emb_dim = 512
self.classifier = nn.Linear(self.emb_dim, nID)
self.test_emb=test_emb
def forward(self, x, targets=None, targets_len=None):
self.losses = OrderedDict()
for ln in self.loss_names:
self.losses[ln] = 0
is_training = (targets is not None) and (not self.test_emb)
#img_size = x.shape[-1]
layer_outputs = []
output = []
for i, (module_def, module) in enumerate(zip(self.module_defs, self.module_list)):
mtype = module_def['type']
if mtype in ['convolutional', 'upsample', 'maxpool']:
x = module(x)
elif mtype == 'route':
layer_i = [int(x) for x in module_def['layers'].split(',')]
if len(layer_i) == 1:
x = layer_outputs[layer_i[0]]
else:
x = torch.cat([layer_outputs[i] for i in layer_i], 1)
elif mtype == 'shortcut':
layer_i = int(module_def['from'])
x = layer_outputs[-1] + layer_outputs[layer_i]
elif mtype == 'yolo':
if is_training: # get loss
targets = [targets[i][:int(l)] for i,l in enumerate(targets_len)]
x, *losses = module[0](x, self.img_size, targets, self.classifier)
for name, loss in zip(self.loss_names, losses):
self.losses[name] += loss
elif self.test_emb:
targets = [targets[i][:int(l)] for i,l in enumerate(targets_len)]
x = module[0](x, self.img_size, targets, self.classifier, self.test_emb)
else: # get detections
x = module[0](x, self.img_size)
output.append(x)
layer_outputs.append(x)
if is_training:
self.losses['nT'] /= 3
output = [o.squeeze() for o in output]
return sum(output), torch.Tensor(list(self.losses.values())).cuda()
elif self.test_emb:
return torch.cat(output, 0)
return torch.cat(output, 1)
def create_grids(self, img_size, nGh, nGw):
self.stride = img_size[0]/nGw
assert self.stride == img_size[1] / nGh
# build xy offsets
grid_x = torch.arange(nGw).repeat((nGh, 1)).view((1, 1, nGh, nGw)).float()
grid_y = torch.arange(nGh).repeat((nGw, 1)).transpose(0,1).view((1, 1, nGh, nGw)).float()
#grid_y = grid_x.permute(0, 1, 3, 2)
self.grid_xy = torch.stack((grid_x, grid_y), 4)
# build wh gains
self.anchor_vec = self.anchors / self.stride
self.anchor_wh = self.anchor_vec.view(1, self.nA, 1, 1, 2)
def load_darknet_weights(self, weights, cutoff=-1):
# Parses and loads the weights stored in 'weights'
# cutoff: save layers between 0 and cutoff (if cutoff = -1 all are saved)
weights_file = weights.split(os.sep)[-1]
# Try to download weights if not available locally
if not os.path.isfile(weights):
try:
os.system('wget https://pjreddie.com/media/files/' + weights_file + ' -O ' + weights)
except IOError:
print(weights + ' not found')
# Establish cutoffs
if weights_file == 'darknet53.conv.74':
cutoff = 75
elif weights_file == 'yolov3-tiny.conv.15':
cutoff = 15
# Open the weights file
fp = open(weights, 'rb')
header = np.fromfile(fp, dtype=np.int32, count=5) # First five are header values
# Needed to write header when saving weights
self.header_info = header
self.seen = header[3] # number of images seen during training
weights = np.fromfile(fp, dtype=np.float32) # The rest are weights
fp.close()
ptr = 0
for i, (module_def, module) in enumerate(zip(self.module_defs[:cutoff], self.module_list[:cutoff])):
if module_def['type'] == 'convolutional':
conv_layer = module[0]
if module_def['batch_normalize']:
# Load BN bias, weights, running mean and running variance
bn_layer = module[1]
num_b = bn_layer.bias.numel() # Number of biases
# Bias
bn_b = torch.from_numpy(weights[ptr:ptr + num_b]).view_as(bn_layer.bias)
bn_layer.bias.data.copy_(bn_b)
ptr += num_b
# Weight
bn_w = torch.from_numpy(weights[ptr:ptr + num_b]).view_as(bn_layer.weight)
bn_layer.weight.data.copy_(bn_w)
ptr += num_b
# Running Mean
bn_rm = torch.from_numpy(weights[ptr:ptr + num_b]).view_as(bn_layer.running_mean)
bn_layer.running_mean.data.copy_(bn_rm)
ptr += num_b
# Running Var
bn_rv = torch.from_numpy(weights[ptr:ptr + num_b]).view_as(bn_layer.running_var)
bn_layer.running_var.data.copy_(bn_rv)
ptr += num_b
else:
# Load conv. bias
num_b = conv_layer.bias.numel()
conv_b = torch.from_numpy(weights[ptr:ptr + num_b]).view_as(conv_layer.bias)
conv_layer.bias.data.copy_(conv_b)
ptr += num_b
# Load conv. weights
num_w = conv_layer.weight.numel()
conv_w = torch.from_numpy(weights[ptr:ptr + num_w]).view_as(conv_layer.weight)
conv_layer.weight.data.copy_(conv_w)
ptr += num_w
"""
@:param path - path of the new weights file
@:param cutoff - save layers between 0 and cutoff (cutoff = -1 -> all are saved)
"""
def save_weights(self, path, cutoff=-1):
fp = open(path, 'wb')
self.header_info[3] = self.seen # number of images seen during training
self.header_info.tofile(fp)
# Iterate through layers
for i, (module_def, module) in enumerate(zip(self.module_defs[:cutoff], self.module_list[:cutoff])):
if module_def['type'] == 'convolutional':
conv_layer = module[0]
# If batch norm, load bn first
if module_def['batch_normalize']:
bn_layer = module[1]
bn_layer.bias.data.cpu().numpy().tofile(fp)
bn_layer.weight.data.cpu().numpy().tofile(fp)
bn_layer.running_mean.data.cpu().numpy().tofile(fp)
bn_layer.running_var.data.cpu().numpy().tofile(fp)
# Load conv bias
else:
conv_layer.bias.data.cpu().numpy().tofile(fp)
# Load conv weights
conv_layer.weight.data.cpu().numpy().tofile(fp)
fp.close()

75
setup.py Normal file
View File

@ -0,0 +1,75 @@
###################################################################
# File Name: setup.py
# Author: Zhongdao Wang
# mail: wcd17@mails.tsinghua.edu.cn
# Created Time: Thu 19 Dec 2019 07:29:02 PM CST
###################################################################
from __future__ import print_function
from __future__ import division
from __future__ import absolute_import
import os
import glob
import torch
from setuptools import find_packages
from setuptools import setup
from torch.utils.cpp_extension import CUDA_HOME
from torch.utils.cpp_extension import CppExtension
from torch.utils.cpp_extension import CUDAExtension
def get_extensions():
this_dir = os.path.dirname(os.path.abspath(__file__))
extensions_dir = os.path.join(this_dir, "utils", "nms")
main_file = glob.glob(os.path.join(extensions_dir, "*.cpp"))
source_cpu = glob.glob(os.path.join(extensions_dir, "*.cpp"))
source_cuda = glob.glob(os.path.join(extensions_dir, "*.cu"))
sources = main_file
extension = CppExtension
extra_compile_args = {"cxx": []}
define_macros = []
#if (torch.cuda.is_available() and CUDA_HOME is not None) or os.getenv("FORCE_CUDA", "0") == "1":
if False:
extension = CUDAExtension
sources += source_cuda
define_macros += [("WITH_CUDA", None)]
extra_compile_args["nvcc"] = [
"-DCUDA_HAS_FP16=1",
"-D__CUDA_NO_HALF_OPERATORS__",
"-D__CUDA_NO_HALF_CONVERSIONS__",
"-D__CUDA_NO_HALF2_OPERATORS__",
]
sources = [os.path.join(extensions_dir, s) for s in sources]
include_dirs = [extensions_dir]
ext_modules = [
extension(
"nms",
sources,
include_dirs=include_dirs,
define_macros=define_macros,
extra_compile_args=extra_compile_args,
)
]
return ext_modules
print(get_extensions())
setup(
name="nms",
version="0.1",
author="fmassa",
url="https://github.com/facebookresearch/maskrcnn-benchmark",
description="GPU supported NMS",
ext_modules=get_extensions(),
cmdclass={"build_ext": torch.utils.cpp_extension.BuildExtension},
)

530
test.py
View File

@ -1,265 +1,265 @@
import argparse
import json
import time
from pathlib import Path
from sklearn import metrics
from scipy import interpolate
import torch.nn.functional as F
from models import *
from utils.utils import *
from torchvision.transforms import transforms as T
from utils.datasets import LoadImagesAndLabels, JointDataset, collate_fn
def test(
cfg,
data_cfg,
weights,
batch_size=16,
img_size=416,
iou_thres=0.5,
conf_thres=0.3,
nms_thres=0.45,
print_interval=40,
nID=14455,
):
# Configure run
f = open(data_cfg)
data_cfg_dict = json.load(f)
f.close()
#nC = int(data_cfg_dict['classes']) # number of classes (80 for COCO)
nC = 1
test_path = data_cfg_dict['test']
dataset_root = data_cfg_dict['root']
# Initialize model
model = Darknet(cfg, img_size, nID)
# Load weights
if weights.endswith('.pt'): # pytorch format
model.load_state_dict(torch.load(weights, map_location='cpu')['model'], strict=False)
else: # darknet format
load_darknet_weights(model, weights)
model = torch.nn.DataParallel(model)
model.cuda().eval()
# Get dataloader
transforms = T.Compose([T.ToTensor()])
dataset = JointDataset(dataset_root, test_path, img_size, augment=False, transforms=transforms)
dataloader = torch.utils.data.DataLoader(dataset, batch_size=batch_size, shuffle=False,
num_workers=8, drop_last=False, collate_fn=collate_fn)
mean_mAP, mean_R, mean_P, seen = 0.0, 0.0, 0.0, 0
print('%11s' * 5 % ('Image', 'Total', 'P', 'R', 'mAP'))
outputs, mAPs, mR, mP, TP, confidence, pred_class, target_class, jdict = \
[], [], [], [], [], [], [], [], []
AP_accum, AP_accum_count = np.zeros(nC), np.zeros(nC)
for batch_i, (imgs, targets, paths, shapes, targets_len) in enumerate(dataloader):
t = time.time()
output = model(imgs.cuda())
output = non_max_suppression(output, conf_thres=conf_thres, nms_thres=nms_thres)
for i, o in enumerate(output):
if o is not None:
output[i] = o[:, :6]
# Compute average precision for each sample
targets = [targets[i][:int(l)] for i,l in enumerate(targets_len)]
for si, (labels, detections) in enumerate(zip(targets, output)):
seen += 1
if detections is None:
# If there are labels but no detections mark as zero AP
if labels.size(0) != 0:
mAPs.append(0), mR.append(0), mP.append(0)
continue
# Get detections sorted by decreasing confidence scores
detections = detections.cpu().numpy()
detections = detections[np.argsort(-detections[:, 4])]
# If no labels add number of detections as incorrect
correct = []
if labels.size(0) == 0:
# correct.extend([0 for _ in range(len(detections))])
mAPs.append(0), mR.append(0), mP.append(0)
continue
else:
target_cls = labels[:, 0]
# Extract target boxes as (x1, y1, x2, y2)
target_boxes = xywh2xyxy(labels[:, 2:6])
target_boxes[:, 0] *= img_size[0]
target_boxes[:, 2] *= img_size[0]
target_boxes[:, 1] *= img_size[1]
target_boxes[:, 3] *= img_size[1]
detected = []
for *pred_bbox, conf, obj_conf in detections:
obj_pred = 0
pred_bbox = torch.FloatTensor(pred_bbox).view(1, -1)
# Compute iou with target boxes
iou = bbox_iou(pred_bbox, target_boxes, x1y1x2y2=True)[0]
# Extract index of largest overlap
best_i = np.argmax(iou)
# If overlap exceeds threshold and classification is correct mark as correct
if iou[best_i] > iou_thres and obj_pred == labels[best_i, 0] and best_i not in detected:
correct.append(1)
detected.append(best_i)
else:
correct.append(0)
# Compute Average Precision (AP) per class
AP, AP_class, R, P = ap_per_class(tp=correct,
conf=detections[:, 4],
pred_cls=np.zeros_like(detections[:, 5]), # detections[:, 6]
target_cls=target_cls)
# Accumulate AP per class
AP_accum_count += np.bincount(AP_class, minlength=nC)
AP_accum += np.bincount(AP_class, minlength=nC, weights=AP)
# Compute mean AP across all classes in this image, and append to image list
mAPs.append(AP.mean())
mR.append(R.mean())
mP.append(P.mean())
# Means of all images
mean_mAP = np.sum(mAPs) / ( AP_accum_count + 1E-16)
mean_R = np.sum(mR) / ( AP_accum_count + 1E-16)
mean_P = np.sum(mP) / (AP_accum_count + 1E-16)
if batch_i % print_interval==0:
# Print image mAP and running mean mAP
print(('%11s%11s' + '%11.3g' * 4 + 's') %
(seen, dataloader.dataset.nF, mean_P, mean_R, mean_mAP, time.time() - t))
# Print mAP per class
print('%11s' * 5 % ('Image', 'Total', 'P', 'R', 'mAP'))
print('AP: %-.4f\n\n' % (AP_accum[0] / (AP_accum_count[0] + 1E-16)))
# Return mAP
return mean_mAP, mean_R, mean_P
def test_emb(
cfg,
data_cfg,
weights,
batch_size=16,
img_size=416,
iou_thres=0.5,
conf_thres=0.3,
nms_thres=0.45,
print_interval=40,
nID=14455,
):
# Configure run
f = open(data_cfg)
data_cfg_dict = json.load(f)
f.close()
test_paths = data_cfg_dict['test_emb']
dataset_root = data_cfg_dict['root']
# Initialize model
model = Darknet(cfg, img_size, nID, test_emb=True)
# Load weights
if weights.endswith('.pt'): # pytorch format
model.load_state_dict(torch.load(weights, map_location='cpu')['model'], strict=False)
else: # darknet format
load_darknet_weights(model, weights)
model = torch.nn.DataParallel(model)
model.cuda().eval()
# Get dataloader
transforms = T.Compose([T.ToTensor()])
dataset = JointDataset(dataset_root, test_paths, img_size, augment=False, transforms=transforms)
dataloader = torch.utils.data.DataLoader(dataset, batch_size=batch_size, shuffle=False,
num_workers=8, drop_last=False, collate_fn=collate_fn)
embedding, id_labels = [], []
print('Extracting pedestrain features...')
for batch_i, (imgs, targets, paths, shapes, targets_len) in enumerate(dataloader):
t = time.time()
output = model(imgs.cuda(), targets.cuda(), targets_len.cuda()).squeeze()
for out in output:
feat, label = out[:-1], out[-1].long()
if label != -1:
embedding.append(feat)
id_labels.append(label)
if batch_i % print_interval==0:
print('Extracting {}/{}, # of instances {}, time {:.2f} sec.'.format(batch_i, len(dataloader), len(id_labels), time.time() - t))
print('Computing pairwise similairity...')
if len(embedding) <1 :
return None
embedding = torch.stack(embedding, dim=0).cuda()
id_labels = torch.LongTensor(id_labels)
n = len(id_labels)
print(n, len(embedding))
assert len(embedding) == n
embedding = F.normalize(embedding, dim=1)
pdist = torch.mm(embedding, embedding.t()).cpu().numpy()
gt = id_labels.expand(n,n).eq(id_labels.expand(n,n).t()).numpy()
up_triangle = np.where(np.triu(pdist)- np.eye(n)*pdist !=0)
pdist = pdist[up_triangle]
gt = gt[up_triangle]
far_levels = [ 1e-6, 1e-5, 1e-4, 1e-3, 1e-2, 1e-1]
far,tar,threshold = metrics.roc_curve(gt, pdist)
interp = interpolate.interp1d(far, tar)
tar_at_far = [interp(x) for x in far_levels]
for f,fa in enumerate(far_levels):
print('TPR@FAR={:.7f}: {:.4f}'.format(fa, tar_at_far[f]))
return tar_at_far
if __name__ == '__main__':
parser = argparse.ArgumentParser(prog='test.py')
parser.add_argument('--batch-size', type=int, default=40, help='size of each image batch')
parser.add_argument('--cfg', type=str, default='cfg/yolov3.cfg', help='cfg file path')
parser.add_argument('--data-cfg', type=str, default='cfg/ccmcpe.json', help='data config')
parser.add_argument('--weights', type=str, default='weights/latest.pt', help='path to weights file')
parser.add_argument('--iou-thres', type=float, default=0.5, help='iou threshold required to qualify as detected')
parser.add_argument('--conf-thres', type=float, default=0.3, help='object confidence threshold')
parser.add_argument('--nms-thres', type=float, default=0.5, help='iou threshold for non-maximum suppression')
parser.add_argument('--img-size', type=int, default=(1088, 608), help='size of each image dimension')
parser.add_argument('--print-interval', type=int, default=10, help='size of each image dimension')
parser.add_argument('--test-emb', action='store_true', help='test embedding')
opt = parser.parse_args()
print(opt, end='\n\n')
with torch.no_grad():
if opt.test_emb:
res = test_emb(
opt.cfg,
opt.data_cfg,
opt.weights,
opt.batch_size,
opt.img_size,
opt.iou_thres,
opt.conf_thres,
opt.nms_thres,
opt.print_interval,
)
else:
mAP = test(
opt.cfg,
opt.data_cfg,
opt.weights,
opt.batch_size,
opt.img_size,
opt.iou_thres,
opt.conf_thres,
opt.nms_thres,
opt.print_interval,
)
import argparse
import json
import time
from pathlib import Path
from sklearn import metrics
from scipy import interpolate
import torch.nn.functional as F
from models import *
from utils.utils import *
from torchvision.transforms import transforms as T
from utils.datasets import LoadImagesAndLabels, JointDataset, collate_fn
def test(
cfg,
data_cfg,
weights,
batch_size=16,
img_size=416,
iou_thres=0.5,
conf_thres=0.3,
nms_thres=0.45,
print_interval=40,
nID=14455,
):
# Configure run
f = open(data_cfg)
data_cfg_dict = json.load(f)
f.close()
#nC = int(data_cfg_dict['classes']) # number of classes (80 for COCO)
nC = 1
test_path = data_cfg_dict['test']
dataset_root = data_cfg_dict['root']
# Initialize model
model = Darknet(cfg, img_size, nID)
# Load weights
if weights.endswith('.pt'): # pytorch format
model.load_state_dict(torch.load(weights, map_location='cpu')['model'], strict=False)
else: # darknet format
load_darknet_weights(model, weights)
model = torch.nn.DataParallel(model)
model.cuda().eval()
# Get dataloader
transforms = T.Compose([T.ToTensor()])
dataset = JointDataset(dataset_root, test_path, img_size, augment=False, transforms=transforms)
dataloader = torch.utils.data.DataLoader(dataset, batch_size=batch_size, shuffle=False,
num_workers=8, drop_last=False, collate_fn=collate_fn)
mean_mAP, mean_R, mean_P, seen = 0.0, 0.0, 0.0, 0
print('%11s' * 5 % ('Image', 'Total', 'P', 'R', 'mAP'))
outputs, mAPs, mR, mP, TP, confidence, pred_class, target_class, jdict = \
[], [], [], [], [], [], [], [], []
AP_accum, AP_accum_count = np.zeros(nC), np.zeros(nC)
for batch_i, (imgs, targets, paths, shapes, targets_len) in enumerate(dataloader):
t = time.time()
output = model(imgs.cuda())
output = non_max_suppression(output, conf_thres=conf_thres, nms_thres=nms_thres)
for i, o in enumerate(output):
if o is not None:
output[i] = o[:, :6]
# Compute average precision for each sample
targets = [targets[i][:int(l)] for i,l in enumerate(targets_len)]
for si, (labels, detections) in enumerate(zip(targets, output)):
seen += 1
if detections is None:
# If there are labels but no detections mark as zero AP
if labels.size(0) != 0:
mAPs.append(0), mR.append(0), mP.append(0)
continue
# Get detections sorted by decreasing confidence scores
detections = detections.cpu().numpy()
detections = detections[np.argsort(-detections[:, 4])]
# If no labels add number of detections as incorrect
correct = []
if labels.size(0) == 0:
# correct.extend([0 for _ in range(len(detections))])
mAPs.append(0), mR.append(0), mP.append(0)
continue
else:
target_cls = labels[:, 0]
# Extract target boxes as (x1, y1, x2, y2)
target_boxes = xywh2xyxy(labels[:, 2:6])
target_boxes[:, 0] *= img_size[0]
target_boxes[:, 2] *= img_size[0]
target_boxes[:, 1] *= img_size[1]
target_boxes[:, 3] *= img_size[1]
detected = []
for *pred_bbox, conf, obj_conf in detections:
obj_pred = 0
pred_bbox = torch.FloatTensor(pred_bbox).view(1, -1)
# Compute iou with target boxes
iou = bbox_iou(pred_bbox, target_boxes, x1y1x2y2=True)[0]
# Extract index of largest overlap
best_i = np.argmax(iou)
# If overlap exceeds threshold and classification is correct mark as correct
if iou[best_i] > iou_thres and obj_pred == labels[best_i, 0] and best_i not in detected:
correct.append(1)
detected.append(best_i)
else:
correct.append(0)
# Compute Average Precision (AP) per class
AP, AP_class, R, P = ap_per_class(tp=correct,
conf=detections[:, 4],
pred_cls=np.zeros_like(detections[:, 5]), # detections[:, 6]
target_cls=target_cls)
# Accumulate AP per class
AP_accum_count += np.bincount(AP_class, minlength=nC)
AP_accum += np.bincount(AP_class, minlength=nC, weights=AP)
# Compute mean AP across all classes in this image, and append to image list
mAPs.append(AP.mean())
mR.append(R.mean())
mP.append(P.mean())
# Means of all images
mean_mAP = np.sum(mAPs) / ( AP_accum_count + 1E-16)
mean_R = np.sum(mR) / ( AP_accum_count + 1E-16)
mean_P = np.sum(mP) / (AP_accum_count + 1E-16)
if batch_i % print_interval==0:
# Print image mAP and running mean mAP
print(('%11s%11s' + '%11.3g' * 4 + 's') %
(seen, dataloader.dataset.nF, mean_P, mean_R, mean_mAP, time.time() - t))
# Print mAP per class
print('%11s' * 5 % ('Image', 'Total', 'P', 'R', 'mAP'))
print('AP: %-.4f\n\n' % (AP_accum[0] / (AP_accum_count[0] + 1E-16)))
# Return mAP
return mean_mAP, mean_R, mean_P
def test_emb(
cfg,
data_cfg,
weights,
batch_size=16,
img_size=416,
iou_thres=0.5,
conf_thres=0.3,
nms_thres=0.45,
print_interval=40,
nID=14455,
):
# Configure run
f = open(data_cfg)
data_cfg_dict = json.load(f)
f.close()
test_paths = data_cfg_dict['test_emb']
dataset_root = data_cfg_dict['root']
# Initialize model
model = Darknet(cfg, img_size, nID, test_emb=True)
# Load weights
if weights.endswith('.pt'): # pytorch format
model.load_state_dict(torch.load(weights, map_location='cpu')['model'], strict=False)
else: # darknet format
load_darknet_weights(model, weights)
model = torch.nn.DataParallel(model)
model.cuda().eval()
# Get dataloader
transforms = T.Compose([T.ToTensor()])
dataset = JointDataset(dataset_root, test_paths, img_size, augment=False, transforms=transforms)
dataloader = torch.utils.data.DataLoader(dataset, batch_size=batch_size, shuffle=False,
num_workers=8, drop_last=False, collate_fn=collate_fn)
embedding, id_labels = [], []
print('Extracting pedestrain features...')
for batch_i, (imgs, targets, paths, shapes, targets_len) in enumerate(dataloader):
t = time.time()
output = model(imgs.cuda(), targets.cuda(), targets_len.cuda()).squeeze()
for out in output:
feat, label = out[:-1], out[-1].long()
if label != -1:
embedding.append(feat)
id_labels.append(label)
if batch_i % print_interval==0:
print('Extracting {}/{}, # of instances {}, time {:.2f} sec.'.format(batch_i, len(dataloader), len(id_labels), time.time() - t))
print('Computing pairwise similairity...')
if len(embedding) <1 :
return None
embedding = torch.stack(embedding, dim=0).cuda()
id_labels = torch.LongTensor(id_labels)
n = len(id_labels)
print(n, len(embedding))
assert len(embedding) == n
embedding = F.normalize(embedding, dim=1)
pdist = torch.mm(embedding, embedding.t()).cpu().numpy()
gt = id_labels.expand(n,n).eq(id_labels.expand(n,n).t()).numpy()
up_triangle = np.where(np.triu(pdist)- np.eye(n)*pdist !=0)
pdist = pdist[up_triangle]
gt = gt[up_triangle]
far_levels = [ 1e-6, 1e-5, 1e-4, 1e-3, 1e-2, 1e-1]
far,tar,threshold = metrics.roc_curve(gt, pdist)
interp = interpolate.interp1d(far, tar)
tar_at_far = [interp(x) for x in far_levels]
for f,fa in enumerate(far_levels):
print('TPR@FAR={:.7f}: {:.4f}'.format(fa, tar_at_far[f]))
return tar_at_far
if __name__ == '__main__':
parser = argparse.ArgumentParser(prog='test.py')
parser.add_argument('--batch-size', type=int, default=40, help='size of each image batch')
parser.add_argument('--cfg', type=str, default='cfg/yolov3.cfg', help='cfg file path')
parser.add_argument('--data-cfg', type=str, default='cfg/ccmcpe.json', help='data config')
parser.add_argument('--weights', type=str, default='weights/latest.pt', help='path to weights file')
parser.add_argument('--iou-thres', type=float, default=0.5, help='iou threshold required to qualify as detected')
parser.add_argument('--conf-thres', type=float, default=0.3, help='object confidence threshold')
parser.add_argument('--nms-thres', type=float, default=0.5, help='iou threshold for non-maximum suppression')
parser.add_argument('--img-size', type=int, default=(1088, 608), help='size of each image dimension')
parser.add_argument('--print-interval', type=int, default=10, help='size of each image dimension')
parser.add_argument('--test-emb', action='store_true', help='test embedding')
opt = parser.parse_args()
print(opt, end='\n\n')
with torch.no_grad():
if opt.test_emb:
res = test_emb(
opt.cfg,
opt.data_cfg,
opt.weights,
opt.batch_size,
opt.img_size,
opt.iou_thres,
opt.conf_thres,
opt.nms_thres,
opt.print_interval,
)
else:
mAP = test(
opt.cfg,
opt.data_cfg,
opt.weights,
opt.batch_size,
opt.img_size,
opt.iou_thres,
opt.conf_thres,
opt.nms_thres,
opt.print_interval,
)

359
track.py
View File

@ -1,175 +1,184 @@
import os
import os.path as osp
import cv2
import logging
import argparse
import motmetrics as mm
from tracker.multitracker import JDETracker
from utils import visualization as vis
from utils.log import logger
from utils.timer import Timer
from utils.evaluation import Evaluator
import utils.datasets as datasets
import torch
from utils.utils import *
def write_results(filename, results, data_type):
if data_type == 'mot':
save_format = '{frame},{id},{x1},{y1},{w},{h},1,-1,-1,-1\n'
elif data_type == 'kitti':
save_format = '{frame} {id} pedestrian 0 0 -10 {x1} {y1} {x2} {y2} -10 -10 -10 -1000 -1000 -1000 -10\n'
else:
raise ValueError(data_type)
with open(filename, 'w') as f:
for frame_id, tlwhs, track_ids in results:
if data_type == 'kitti':
frame_id -= 1
for tlwh, track_id in zip(tlwhs, track_ids):
if track_id < 0:
continue
x1, y1, w, h = tlwh
x2, y2 = x1 + w, y1 + h
line = save_format.format(frame=frame_id, id=track_id, x1=x1, y1=y1, x2=x2, y2=y2, w=w, h=h)
f.write(line)
logger.info('save results to {}'.format(filename))
def eval_seq(opt, dataloader, data_type, result_filename, save_dir=None, show_image=True, frame_rate=30):
if save_dir:
mkdir_if_missing(save_dir)
tracker = JDETracker(opt, frame_rate=frame_rate)
timer = Timer()
results = []
frame_id = 0
for path, img, img0 in dataloader:
if frame_id % 20 == 0:
logger.info('Processing frame {} ({:.2f} fps)'.format(frame_id, 1./max(1e-5, timer.average_time)))
# run tracking
timer.tic()
blob = torch.from_numpy(img).cuda().unsqueeze(0)
online_targets = tracker.update(blob, img0)
online_tlwhs = []
online_ids = []
for t in online_targets:
tlwh = t.tlwh
tid = t.track_id
vertical = tlwh[2] / tlwh[3] > 1.6
if tlwh[2] * tlwh[3] > opt.min_box_area and not vertical:
online_tlwhs.append(tlwh)
online_ids.append(tid)
timer.toc()
# save results
results.append((frame_id + 1, online_tlwhs, online_ids))
if show_image or save_dir is not None:
online_im = vis.plot_tracking(img0, online_tlwhs, online_ids, frame_id=frame_id,
fps=1. / timer.average_time)
if show_image:
cv2.imshow('online_im', online_im)
if save_dir is not None:
cv2.imwrite(os.path.join(save_dir, '{:05d}.jpg'.format(frame_id)), online_im)
frame_id += 1
# save results
write_results(result_filename, results, data_type)
return frame_id, timer.average_time, timer.calls
def main(opt, data_root='/data/MOT16/train', det_root=None, seqs=('MOT16-05',), exp_name='demo',
save_images=False, save_videos=False, show_image=True):
logger.setLevel(logging.INFO)
result_root = os.path.join(data_root, '..', 'results', exp_name)
mkdir_if_missing(result_root)
data_type = 'mot'
# run tracking
accs = []
n_frame = 0
timer_avgs, timer_calls = [], []
for seq in seqs:
output_dir = os.path.join(data_root, '..','outputs', exp_name, seq) if save_images or save_videos else None
logger.info('start seq: {}'.format(seq))
dataloader = datasets.LoadImages(osp.join(data_root, seq, 'img1'), opt.img_size)
result_filename = os.path.join(result_root, '{}.txt'.format(seq))
meta_info = open(os.path.join(data_root, seq, 'seqinfo.ini')).read()
frame_rate = int(meta_info[meta_info.find('frameRate')+10:meta_info.find('\nseqLength')])
nf, ta, tc = eval_seq(opt, dataloader, data_type, result_filename,
save_dir=output_dir, show_image=show_image, frame_rate=frame_rate)
n_frame += nf
timer_avgs.append(ta)
timer_calls.append(tc)
# eval
logger.info('Evaluate seq: {}'.format(seq))
evaluator = Evaluator(data_root, seq, data_type)
accs.append(evaluator.eval_file(result_filename))
if save_videos:
output_video_path = osp.join(output_dir, '{}.mp4'.format(seq))
cmd_str = 'ffmpeg -f image2 -i {}/%05d.jpg -c:v copy {}'.format(output_dir, output_video_path)
os.system(cmd_str)
timer_avgs = np.asarray(timer_avgs)
timer_calls = np.asarray(timer_calls)
all_time = np.dot(timer_avgs, timer_calls)
avg_time = all_time / np.sum(timer_calls)
logger.info('Time elapsed: {:.2f} seconds, FPS: {:.2f}'.format(all_time, 1.0 / avg_time))
# get summary
metrics = mm.metrics.motchallenge_metrics
mh = mm.metrics.create()
summary = Evaluator.get_summary(accs, seqs, metrics)
strsummary = mm.io.render_summary(
summary,
formatters=mh.formatters,
namemap=mm.io.motchallenge_metric_names
)
print(strsummary)
Evaluator.save_summary(summary, os.path.join(result_root, 'summary_{}.xlsx'.format(exp_name)))
if __name__ == '__main__':
parser = argparse.ArgumentParser(prog='track.py')
parser.add_argument('--cfg', type=str, default='cfg/yolov3.cfg', help='cfg file path')
parser.add_argument('--weights', type=str, default='weights/latest.pt', help='path to weights file')
parser.add_argument('--img-size', type=int, default=(1088, 608), help='size of each image dimension')
parser.add_argument('--iou-thres', type=float, default=0.5, help='iou threshold required to qualify as detected')
parser.add_argument('--conf-thres', type=float, default=0.5, help='object confidence threshold')
parser.add_argument('--nms-thres', type=float, default=0.4, help='iou threshold for non-maximum suppression')
parser.add_argument('--min-box-area', type=float, default=200, help='filter out tiny boxes')
parser.add_argument('--track-buffer', type=int, default=30, help='tracking buffer')
parser.add_argument('--test-mot16', action='store_true', help='tracking buffer')
parser.add_argument('--save-images', action='store_true', help='save tracking results (image)')
parser.add_argument('--save-videos', action='store_true', help='save tracking results (video)')
opt = parser.parse_args()
print(opt, end='\n\n')
if not opt.test_mot16:
seqs_str = '''KITTI-13
KITTI-17
ADL-Rundle-6
PETS09-S2L1
TUD-Campus
TUD-Stadtmitte'''
data_root = '/home/wangzd/datasets/MOT/MOT15/train'
else:
seqs_str = '''MOT16-01
MOT16-03
MOT16-06
MOT16-07
MOT16-08
MOT16-12
MOT16-14'''
data_root = '/home/wangzd/datasets/MOT/MOT16/images/test'
seqs = [seq.strip() for seq in seqs_str.split()]
main(opt,
data_root=data_root,
seqs=seqs,
exp_name=opt.weights.split('/')[-2],
show_image=False,
save_images=opt.save_images,
save_videos=opt.save_videos)
import os
import os.path as osp
import cv2
import logging
import argparse
import motmetrics as mm
from tracker.multitracker import JDETracker
from utils import visualization as vis
from utils.log import logger
from utils.timer import Timer
from utils.evaluation import Evaluator
import utils.datasets as datasets
import torch
from utils.utils import *
def write_results(filename, results, data_type):
if data_type == 'mot':
save_format = '{frame},{id},{x1},{y1},{w},{h},1,-1,-1,-1\n'
elif data_type == 'kitti':
save_format = '{frame} {id} pedestrian 0 0 -10 {x1} {y1} {x2} {y2} -10 -10 -10 -1000 -1000 -1000 -10\n'
else:
raise ValueError(data_type)
with open(filename, 'w') as f:
for frame_id, tlwhs, track_ids in results:
if data_type == 'kitti':
frame_id -= 1
for tlwh, track_id in zip(tlwhs, track_ids):
if track_id < 0:
continue
x1, y1, w, h = tlwh
x2, y2 = x1 + w, y1 + h
line = save_format.format(frame=frame_id, id=track_id, x1=x1, y1=y1, x2=x2, y2=y2, w=w, h=h)
f.write(line)
logger.info('save results to {}'.format(filename))
def eval_seq(opt, dataloader, data_type, result_filename, save_dir=None, show_image=True, frame_rate=30):
if save_dir:
mkdir_if_missing(save_dir)
tracker = JDETracker(opt, frame_rate=frame_rate)
timer = Timer()
results = []
frame_id = 0
for path, img, img0 in dataloader:
if frame_id % 20 == 0:
logger.info('Processing frame {} ({:.2f} fps)'.format(frame_id, 1./max(1e-5, timer.average_time)))
# run tracking
timer.tic()
blob = torch.from_numpy(img).cuda().unsqueeze(0)
online_targets = tracker.update(blob, img0)
online_tlwhs = []
online_ids = []
for t in online_targets:
tlwh = t.tlwh
tid = t.track_id
vertical = tlwh[2] / tlwh[3] > 1.6
if tlwh[2] * tlwh[3] > opt.min_box_area and not vertical:
online_tlwhs.append(tlwh)
online_ids.append(tid)
timer.toc()
# save results
results.append((frame_id + 1, online_tlwhs, online_ids))
if show_image or save_dir is not None:
online_im = vis.plot_tracking(img0, online_tlwhs, online_ids, frame_id=frame_id,
fps=1. / timer.average_time)
if show_image:
cv2.imshow('online_im', online_im)
if save_dir is not None:
cv2.imwrite(os.path.join(save_dir, '{:05d}.jpg'.format(frame_id)), online_im)
frame_id += 1
# save results
write_results(result_filename, results, data_type)
return frame_id, timer.average_time, timer.calls
def main(opt, data_root='/data/MOT16/train', det_root=None, seqs=('MOT16-05',), exp_name='demo',
save_images=False, save_videos=False, show_image=True):
logger.setLevel(logging.INFO)
result_root = os.path.join(data_root, '..', 'results', exp_name)
mkdir_if_missing(result_root)
data_type = 'mot'
# run tracking
accs = []
n_frame = 0
timer_avgs, timer_calls = [], []
for seq in seqs:
output_dir = os.path.join(data_root, '..','outputs', exp_name, seq) if save_images or save_videos else None
logger.info('start seq: {}'.format(seq))
dataloader = datasets.LoadImages(osp.join(data_root, seq, 'img1'), opt.img_size)
result_filename = os.path.join(result_root, '{}.txt'.format(seq))
meta_info = open(os.path.join(data_root, seq, 'seqinfo.ini')).read()
frame_rate = int(meta_info[meta_info.find('frameRate')+10:meta_info.find('\nseqLength')])
nf, ta, tc = eval_seq(opt, dataloader, data_type, result_filename,
save_dir=output_dir, show_image=show_image, frame_rate=frame_rate)
n_frame += nf
timer_avgs.append(ta)
timer_calls.append(tc)
# eval
logger.info('Evaluate seq: {}'.format(seq))
evaluator = Evaluator(data_root, seq, data_type)
accs.append(evaluator.eval_file(result_filename))
if save_videos:
output_video_path = osp.join(output_dir, '{}.mp4'.format(seq))
cmd_str = 'ffmpeg -f image2 -i {}/%05d.jpg -c:v copy {}'.format(output_dir, output_video_path)
os.system(cmd_str)
timer_avgs = np.asarray(timer_avgs)
timer_calls = np.asarray(timer_calls)
all_time = np.dot(timer_avgs, timer_calls)
avg_time = all_time / np.sum(timer_calls)
logger.info('Time elapsed: {:.2f} seconds, FPS: {:.2f}'.format(all_time, 1.0 / avg_time))
# get summary
metrics = mm.metrics.motchallenge_metrics
mh = mm.metrics.create()
summary = Evaluator.get_summary(accs, seqs, metrics)
strsummary = mm.io.render_summary(
summary,
formatters=mh.formatters,
namemap=mm.io.motchallenge_metric_names
)
print(strsummary)
Evaluator.save_summary(summary, os.path.join(result_root, 'summary_{}.xlsx'.format(exp_name)))
if __name__ == '__main__':
parser = argparse.ArgumentParser(prog='track.py')
parser.add_argument('--cfg', type=str, default='cfg/yolov3.cfg', help='cfg file path')
parser.add_argument('--weights', type=str, default='weights/latest.pt', help='path to weights file')
parser.add_argument('--img-size', type=int, default=(1088, 608), help='size of each image dimension')
parser.add_argument('--iou-thres', type=float, default=0.5, help='iou threshold required to qualify as detected')
parser.add_argument('--conf-thres', type=float, default=0.5, help='object confidence threshold')
parser.add_argument('--nms-thres', type=float, default=0.4, help='iou threshold for non-maximum suppression')
parser.add_argument('--min-box-area', type=float, default=200, help='filter out tiny boxes')
parser.add_argument('--track-buffer', type=int, default=30, help='tracking buffer')
parser.add_argument('--test-mot16', action='store_true', help='tracking buffer')
parser.add_argument('--save-images', action='store_true', help='save tracking results (image)')
parser.add_argument('--save-videos', action='store_true', help='save tracking results (video)')
opt = parser.parse_args()
print(opt, end='\n\n')
if not opt.test_mot16:
#seqs_str = '''KITTI-13
# KITTI-17
# ADL-Rundle-6
# PETS09-S2L1
# TUD-Campus
# TUD-Stadtmitte'''
#data_root = '/home/wangzd/datasets/MOT/MOT15/train'
seqs_str = '''MOT17-02-SDP
MOT17-04-SDP
MOT17-05-SDP
MOT17-09-SDP
MOT17-10-SDP
MOT17-11-SDP
MOT17-13-SDP
'''
data_root = '/home/wangzd/datasets/MOT/MOT17/images/train'
else:
seqs_str = '''MOT16-01
MOT16-03
MOT16-06
MOT16-07
MOT16-08
MOT16-12
MOT16-14'''
data_root = '/home/wangzd/datasets/MOT/MOT16/images/test'
seqs = [seq.strip() for seq in seqs_str.split()]
main(opt,
data_root=data_root,
seqs=seqs,
exp_name=opt.weights.split('/')[-2],
show_image=False,
save_images=opt.save_images,
save_videos=opt.save_videos)

View File

@ -1,53 +1,53 @@
import numpy as np
from collections import OrderedDict
class TrackState(object):
New = 0
Tracked = 1
Lost = 2
Removed = 3
class BaseTrack(object):
_count = 0
track_id = 0
is_activated = False
state = TrackState.New
history = OrderedDict()
features = []
curr_feature = None
score = 0
start_frame = 0
frame_id = 0
time_since_update = 0
# multi-camera
location = (np.inf, np.inf)
@property
def end_frame(self):
return self.frame_id
@staticmethod
def next_id():
BaseTrack._count += 1
return BaseTrack._count
def activate(self, *args):
raise NotImplementedError
def predict(self):
raise NotImplementedError
def update(self, *args, **kwargs):
raise NotImplementedError
def mark_lost(self):
self.state = TrackState.Lost
def mark_removed(self):
self.state = TrackState.Removed
import numpy as np
from collections import OrderedDict
class TrackState(object):
New = 0
Tracked = 1
Lost = 2
Removed = 3
class BaseTrack(object):
_count = 0
track_id = 0
is_activated = False
state = TrackState.New
history = OrderedDict()
features = []
curr_feature = None
score = 0
start_frame = 0
frame_id = 0
time_since_update = 0
# multi-camera
location = (np.inf, np.inf)
@property
def end_frame(self):
return self.frame_id
@staticmethod
def next_id():
BaseTrack._count += 1
return BaseTrack._count
def activate(self, *args):
raise NotImplementedError
def predict(self):
raise NotImplementedError
def update(self, *args, **kwargs):
raise NotImplementedError
def mark_lost(self):
self.state = TrackState.Lost
def mark_removed(self):
self.state = TrackState.Removed

View File

@ -1,122 +1,122 @@
import cv2
import numpy as np
import scipy
from scipy.spatial.distance import cdist
from sklearn.utils import linear_assignment_
from utils.cython_bbox import bbox_ious
from utils import kalman_filter
import time
def merge_matches(m1, m2, shape):
O,P,Q = shape
m1 = np.asarray(m1)
m2 = np.asarray(m2)
M1 = scipy.sparse.coo_matrix((np.ones(len(m1)), (m1[:, 0], m1[:, 1])), shape=(O, P))
M2 = scipy.sparse.coo_matrix((np.ones(len(m2)), (m2[:, 0], m2[:, 1])), shape=(P, Q))
mask = M1*M2
match = mask.nonzero()
match = list(zip(match[0], match[1]))
unmatched_O = tuple(set(range(O)) - set([i for i, j in match]))
unmatched_Q = tuple(set(range(Q)) - set([j for i, j in match]))
return match, unmatched_O, unmatched_Q
def _indices_to_matches(cost_matrix, indices, thresh):
matched_cost = cost_matrix[tuple(zip(*indices))]
matched_mask = (matched_cost <= thresh)
matches = indices[matched_mask]
unmatched_a = tuple(set(range(cost_matrix.shape[0])) - set(matches[:, 0]))
unmatched_b = tuple(set(range(cost_matrix.shape[1])) - set(matches[:, 1]))
return matches, unmatched_a, unmatched_b
def linear_assignment(cost_matrix, thresh):
"""
Simple linear assignment
:type cost_matrix: np.ndarray
:type thresh: float
:return: matches, unmatched_a, unmatched_b
"""
if cost_matrix.size == 0:
return np.empty((0, 2), dtype=int), tuple(range(cost_matrix.shape[0])), tuple(range(cost_matrix.shape[1]))
cost_matrix[cost_matrix > thresh] = thresh + 1e-4
indices = linear_assignment_.linear_assignment(cost_matrix)
return _indices_to_matches(cost_matrix, indices, thresh)
def ious(atlbrs, btlbrs):
"""
Compute cost based on IoU
:type atlbrs: list[tlbr] | np.ndarray
:type atlbrs: list[tlbr] | np.ndarray
:rtype ious np.ndarray
"""
ious = np.zeros((len(atlbrs), len(btlbrs)), dtype=np.float)
if ious.size == 0:
return ious
ious = bbox_ious(
np.ascontiguousarray(atlbrs, dtype=np.float),
np.ascontiguousarray(btlbrs, dtype=np.float)
)
return ious
def iou_distance(atracks, btracks):
"""
Compute cost based on IoU
:type atracks: list[STrack]
:type btracks: list[STrack]
:rtype cost_matrix np.ndarray
"""
if (len(atracks)>0 and isinstance(atracks[0], np.ndarray)) or (len(btracks) > 0 and isinstance(btracks[0], np.ndarray)):
atlbrs = atracks
btlbrs = btracks
else:
atlbrs = [track.tlbr for track in atracks]
btlbrs = [track.tlbr for track in btracks]
_ious = ious(atlbrs, btlbrs)
cost_matrix = 1 - _ious
return cost_matrix
def embedding_distance(tracks, detections, metric='cosine'):
"""
:param tracks: list[STrack]
:param detections: list[BaseTrack]
:param metric:
:return: cost_matrix np.ndarray
"""
cost_matrix = np.zeros((len(tracks), len(detections)), dtype=np.float)
if cost_matrix.size == 0:
return cost_matrix
det_features = np.asarray([track.curr_feat for track in detections], dtype=np.float)
for i, track in enumerate(tracks):
cost_matrix[i, :] = np.maximum(0.0, cdist(track.smooth_feat.reshape(1,-1), det_features, metric))
return cost_matrix
def gate_cost_matrix(kf, cost_matrix, tracks, detections, only_position=False):
if cost_matrix.size == 0:
return cost_matrix
gating_dim = 2 if only_position else 4
gating_threshold = kalman_filter.chi2inv95[gating_dim]
measurements = np.asarray([det.to_xyah() for det in detections])
for row, track in enumerate(tracks):
gating_distance = kf.gating_distance(
track.mean, track.covariance, measurements, only_position)
cost_matrix[row, gating_distance > gating_threshold] = np.inf
return cost_matrix
import cv2
import numpy as np
import scipy
from scipy.spatial.distance import cdist
from sklearn.utils import linear_assignment_
from cython_bbox import bbox_overlaps as bbox_ious
from utils import kalman_filter
import time
def merge_matches(m1, m2, shape):
O,P,Q = shape
m1 = np.asarray(m1)
m2 = np.asarray(m2)
M1 = scipy.sparse.coo_matrix((np.ones(len(m1)), (m1[:, 0], m1[:, 1])), shape=(O, P))
M2 = scipy.sparse.coo_matrix((np.ones(len(m2)), (m2[:, 0], m2[:, 1])), shape=(P, Q))
mask = M1*M2
match = mask.nonzero()
match = list(zip(match[0], match[1]))
unmatched_O = tuple(set(range(O)) - set([i for i, j in match]))
unmatched_Q = tuple(set(range(Q)) - set([j for i, j in match]))
return match, unmatched_O, unmatched_Q
def _indices_to_matches(cost_matrix, indices, thresh):
matched_cost = cost_matrix[tuple(zip(*indices))]
matched_mask = (matched_cost <= thresh)
matches = indices[matched_mask]
unmatched_a = tuple(set(range(cost_matrix.shape[0])) - set(matches[:, 0]))
unmatched_b = tuple(set(range(cost_matrix.shape[1])) - set(matches[:, 1]))
return matches, unmatched_a, unmatched_b
def linear_assignment(cost_matrix, thresh):
"""
Simple linear assignment
:type cost_matrix: np.ndarray
:type thresh: float
:return: matches, unmatched_a, unmatched_b
"""
if cost_matrix.size == 0:
return np.empty((0, 2), dtype=int), tuple(range(cost_matrix.shape[0])), tuple(range(cost_matrix.shape[1]))
cost_matrix[cost_matrix > thresh] = thresh + 1e-4
indices = linear_assignment_.linear_assignment(cost_matrix)
return _indices_to_matches(cost_matrix, indices, thresh)
def ious(atlbrs, btlbrs):
"""
Compute cost based on IoU
:type atlbrs: list[tlbr] | np.ndarray
:type atlbrs: list[tlbr] | np.ndarray
:rtype ious np.ndarray
"""
ious = np.zeros((len(atlbrs), len(btlbrs)), dtype=np.float)
if ious.size == 0:
return ious
ious = bbox_ious(
np.ascontiguousarray(atlbrs, dtype=np.float),
np.ascontiguousarray(btlbrs, dtype=np.float)
)
return ious
def iou_distance(atracks, btracks):
"""
Compute cost based on IoU
:type atracks: list[STrack]
:type btracks: list[STrack]
:rtype cost_matrix np.ndarray
"""
if (len(atracks)>0 and isinstance(atracks[0], np.ndarray)) or (len(btracks) > 0 and isinstance(btracks[0], np.ndarray)):
atlbrs = atracks
btlbrs = btracks
else:
atlbrs = [track.tlbr for track in atracks]
btlbrs = [track.tlbr for track in btracks]
_ious = ious(atlbrs, btlbrs)
cost_matrix = 1 - _ious
return cost_matrix
def embedding_distance(tracks, detections, metric='cosine'):
"""
:param tracks: list[STrack]
:param detections: list[BaseTrack]
:param metric:
:return: cost_matrix np.ndarray
"""
cost_matrix = np.zeros((len(tracks), len(detections)), dtype=np.float)
if cost_matrix.size == 0:
return cost_matrix
det_features = np.asarray([track.curr_feat for track in detections], dtype=np.float)
for i, track in enumerate(tracks):
cost_matrix[i, :] = np.maximum(0.0, cdist(track.smooth_feat.reshape(1,-1), det_features, metric))
return cost_matrix
def gate_cost_matrix(kf, cost_matrix, tracks, detections, only_position=False):
if cost_matrix.size == 0:
return cost_matrix
gating_dim = 2 if only_position else 4
gating_threshold = kalman_filter.chi2inv95[gating_dim]
measurements = np.asarray([det.to_xyah() for det in detections])
for row, track in enumerate(tracks):
gating_distance = kf.gating_distance(
track.mean, track.covariance, measurements, only_position)
cost_matrix[row, gating_distance > gating_threshold] = np.inf
return cost_matrix

View File

@ -1,335 +1,335 @@
import numpy as np
from numba import jit
from collections import deque
import itertools
import os
import os.path as osp
import time
import torch
from utils.utils import *
from utils.log import logger
from utils.kalman_filter import KalmanFilter
from models import *
from tracker import matching
from .basetrack import BaseTrack, TrackState
class STrack(BaseTrack):
def __init__(self, tlwh, score, temp_feat, buffer_size=30):
# wait activate
self._tlwh = np.asarray(tlwh, dtype=np.float)
self.kalman_filter = None
self.mean, self.covariance = None, None
self.is_activated = False
self.score = score
self.tracklet_len = 0
self.smooth_feat = None
self.update_features(temp_feat)
self.features = deque([], maxlen=buffer_size)
self.alpha = 0.9
def update_features(self, feat):
self.curr_feat = feat
if self.smooth_feat is None:
self.smooth_feat = feat
else:
self.smooth_feat = self.alpha *self.smooth_feat + (1-self.alpha) * feat
self.features.append(feat)
self.smooth_feat /= np.linalg.norm(self.smooth_feat)
def predict(self):
mean_state = self.mean.copy()
if self.state != TrackState.Tracked:
mean_state[7] = 0
self.mean, self.covariance = self.kalman_filter.predict(mean_state, self.covariance)
def activate(self, kalman_filter, frame_id):
"""Start a new tracklet"""
self.kalman_filter = kalman_filter
self.track_id = self.next_id()
self.mean, self.covariance = self.kalman_filter.initiate(self.tlwh_to_xyah(self._tlwh))
self.tracklet_len = 0
self.state = TrackState.Tracked
#self.is_activated = True
self.frame_id = frame_id
self.start_frame = frame_id
def re_activate(self, new_track, frame_id, new_id=False):
self.mean, self.covariance = self.kalman_filter.update(
self.mean, self.covariance, self.tlwh_to_xyah(new_track.tlwh)
)
self.update_features(new_track.curr_feat)
self.tracklet_len = 0
self.state = TrackState.Tracked
self.is_activated = True
self.frame_id = frame_id
if new_id:
self.track_id = self.next_id()
def update(self, new_track, frame_id, update_feature=True):
"""
Update a matched track
:type new_track: STrack
:type frame_id: int
:type update_feature: bool
:return:
"""
self.frame_id = frame_id
self.tracklet_len += 1
new_tlwh = new_track.tlwh
self.mean, self.covariance = self.kalman_filter.update(
self.mean, self.covariance, self.tlwh_to_xyah(new_tlwh))
self.state = TrackState.Tracked
self.is_activated = True
self.score = new_track.score
if update_feature:
self.update_features(new_track.curr_feat)
@property
@jit
def tlwh(self):
"""Get current position in bounding box format `(top left x, top left y,
width, height)`.
"""
if self.mean is None:
return self._tlwh.copy()
ret = self.mean[:4].copy()
ret[2] *= ret[3]
ret[:2] -= ret[2:] / 2
return ret
@property
@jit
def tlbr(self):
"""Convert bounding box to format `(min x, min y, max x, max y)`, i.e.,
`(top left, bottom right)`.
"""
ret = self.tlwh.copy()
ret[2:] += ret[:2]
return ret
@staticmethod
@jit
def tlwh_to_xyah(tlwh):
"""Convert bounding box to format `(center x, center y, aspect ratio,
height)`, where the aspect ratio is `width / height`.
"""
ret = np.asarray(tlwh).copy()
ret[:2] += ret[2:] / 2
ret[2] /= ret[3]
return ret
def to_xyah(self):
return self.tlwh_to_xyah(self.tlwh)
@staticmethod
@jit
def tlbr_to_tlwh(tlbr):
ret = np.asarray(tlbr).copy()
ret[2:] -= ret[:2]
return ret
@staticmethod
@jit
def tlwh_to_tlbr(tlwh):
ret = np.asarray(tlwh).copy()
ret[2:] += ret[:2]
return ret
def __repr__(self):
return 'OT_{}_({}-{})'.format(self.track_id, self.start_frame, self.end_frame)
class JDETracker(object):
def __init__(self, opt, frame_rate=30):
self.opt = opt
self.model = Darknet(opt.cfg, opt.img_size, nID=14455)
# load_darknet_weights(self.model, opt.weights)
self.model.load_state_dict(torch.load(opt.weights, map_location='cpu')['model'], strict=False)
self.model.cuda().eval()
self.tracked_stracks = [] # type: list[STrack]
self.lost_stracks = [] # type: list[STrack]
self.removed_stracks = [] # type: list[STrack]
self.frame_id = 0
self.det_thresh = opt.conf_thres
self.buffer_size = int(frame_rate / 30.0 * opt.track_buffer)
self.max_time_lost = self.buffer_size
self.kalman_filter = KalmanFilter()
def update(self, im_blob, img0):
self.frame_id += 1
activated_starcks = []
refind_stracks = []
lost_stracks = []
removed_stracks = []
t1 = time.time()
''' Step 1: Network forward, get detections & embeddings'''
with torch.no_grad():
pred = self.model(im_blob)
pred = pred[pred[:, :, 4] > self.opt.conf_thres]
if len(pred) > 0:
dets = non_max_suppression(pred.unsqueeze(0), self.opt.conf_thres, self.opt.nms_thres)[0].cpu()
scale_coords(self.opt.img_size, dets[:, :4], img0.shape).round()
'''Detections'''
detections = [STrack(STrack.tlbr_to_tlwh(tlbrs[:4]), tlbrs[4], f.numpy(), 30) for
(tlbrs, f) in zip(dets[:, :5], dets[:, -self.model.emb_dim:])]
else:
detections = []
t2 = time.time()
# print('Forward: {} s'.format(t2-t1))
''' Add newly detected tracklets to tracked_stracks'''
unconfirmed = []
tracked_stracks = [] # type: list[STrack]
for track in self.tracked_stracks:
if not track.is_activated:
unconfirmed.append(track)
else:
tracked_stracks.append(track)
''' Step 2: First association, with embedding'''
strack_pool = joint_stracks(tracked_stracks, self.lost_stracks)
# Predict the current location with KF
for strack in strack_pool:
strack.predict()
dists = matching.embedding_distance(strack_pool, detections)
dists = matching.gate_cost_matrix(self.kalman_filter, dists, strack_pool, detections)
matches, u_track, u_detection = matching.linear_assignment(dists, thresh=0.7)
for itracked, idet in matches:
track = strack_pool[itracked]
det = detections[idet]
if track.state == TrackState.Tracked:
track.update(detections[idet], self.frame_id)
activated_starcks.append(track)
else:
track.re_activate(det, self.frame_id, new_id=False)
refind_stracks.append(track)
''' Step 3: Second association, with IOU'''
detections = [detections[i] for i in u_detection]
r_tracked_stracks = [strack_pool[i] for i in u_track if strack_pool[i].state==TrackState.Tracked ]
dists = matching.iou_distance(r_tracked_stracks, detections)
matches, u_track, u_detection = matching.linear_assignment(dists, thresh=0.5)
for itracked, idet in matches:
track = r_tracked_stracks[itracked]
det = detections[idet]
if track.state == TrackState.Tracked:
track.update(det, self.frame_id)
activated_starcks.append(track)
else:
track.re_activate(det, self.frame_id, new_id=False)
refind_stracks.append(track)
for it in u_track:
track = r_tracked_stracks[it]
if not track.state == TrackState.Lost:
track.mark_lost()
lost_stracks.append(track)
'''Deal with unconfirmed tracks, usually tracks with only one beginning frame'''
detections = [detections[i] for i in u_detection]
dists = matching.iou_distance(unconfirmed, detections)
matches, u_unconfirmed, u_detection = matching.linear_assignment(dists, thresh=0.7)
for itracked, idet in matches:
unconfirmed[itracked].update(detections[idet], self.frame_id)
activated_starcks.append(unconfirmed[itracked])
for it in u_unconfirmed:
track = unconfirmed[it]
track.mark_removed()
removed_stracks.append(track)
""" Step 4: Init new stracks"""
for inew in u_detection:
track = detections[inew]
if track.score < self.det_thresh:
continue
track.activate(self.kalman_filter, self.frame_id)
activated_starcks.append(track)
""" Step 5: Update state"""
for track in self.lost_stracks:
if self.frame_id - track.end_frame > self.max_time_lost:
track.mark_removed()
removed_stracks.append(track)
t4 = time.time()
# print('Ramained match {} s'.format(t4-t3))
self.tracked_stracks = [t for t in self.tracked_stracks if t.state == TrackState.Tracked]
self.tracked_stracks = joint_stracks(self.tracked_stracks, activated_starcks)
self.tracked_stracks = joint_stracks(self.tracked_stracks, refind_stracks)
# self.lost_stracks = [t for t in self.lost_stracks if t.state == TrackState.Lost] # type: list[STrack]
self.lost_stracks = sub_stracks(self.lost_stracks, self.tracked_stracks)
self.lost_stracks.extend(lost_stracks)
self.lost_stracks = sub_stracks(self.lost_stracks, self.removed_stracks)
self.removed_stracks.extend(removed_stracks)
self.tracked_stracks, self.lost_stracks = remove_duplicate_stracks(self.tracked_stracks, self.lost_stracks)
# get scores of lost tracks
output_stracks = [track for track in self.tracked_stracks if track.is_activated]
logger.debug('===========Frame {}=========='.format(self.frame_id))
logger.debug('Activated: {}'.format([track.track_id for track in activated_starcks]))
logger.debug('Refind: {}'.format([track.track_id for track in refind_stracks]))
logger.debug('Lost: {}'.format([track.track_id for track in lost_stracks]))
logger.debug('Removed: {}'.format([track.track_id for track in removed_stracks]))
t5 = time.time()
# print('Final {} s'.format(t5-t4))
return output_stracks
def joint_stracks(tlista, tlistb):
exists = {}
res = []
for t in tlista:
exists[t.track_id] = 1
res.append(t)
for t in tlistb:
tid = t.track_id
if not exists.get(tid, 0):
exists[tid] = 1
res.append(t)
return res
def sub_stracks(tlista, tlistb):
stracks = {}
for t in tlista:
stracks[t.track_id] = t
for t in tlistb:
tid = t.track_id
if stracks.get(tid, 0):
del stracks[tid]
return list(stracks.values())
def remove_duplicate_stracks(stracksa, stracksb):
pdist = matching.iou_distance(stracksa, stracksb)
pairs = np.where(pdist<0.15)
dupa, dupb = list(), list()
for p,q in zip(*pairs):
timep = stracksa[p].frame_id - stracksa[p].start_frame
timeq = stracksb[q].frame_id - stracksb[q].start_frame
if timep > timeq:
dupb.append(q)
else:
dupa.append(p)
resa = [t for i,t in enumerate(stracksa) if not i in dupa]
resb = [t for i,t in enumerate(stracksb) if not i in dupb]
return resa, resb
import numpy as np
from numba import jit
from collections import deque
import itertools
import os
import os.path as osp
import time
import torch
from utils.utils import *
from utils.log import logger
from utils.kalman_filter import KalmanFilter
from models import *
from tracker import matching
from .basetrack import BaseTrack, TrackState
class STrack(BaseTrack):
def __init__(self, tlwh, score, temp_feat, buffer_size=30):
# wait activate
self._tlwh = np.asarray(tlwh, dtype=np.float)
self.kalman_filter = None
self.mean, self.covariance = None, None
self.is_activated = False
self.score = score
self.tracklet_len = 0
self.smooth_feat = None
self.update_features(temp_feat)
self.features = deque([], maxlen=buffer_size)
self.alpha = 0.9
def update_features(self, feat):
self.curr_feat = feat
if self.smooth_feat is None:
self.smooth_feat = feat
else:
self.smooth_feat = self.alpha *self.smooth_feat + (1-self.alpha) * feat
self.features.append(feat)
self.smooth_feat /= np.linalg.norm(self.smooth_feat)
def predict(self):
mean_state = self.mean.copy()
if self.state != TrackState.Tracked:
mean_state[7] = 0
self.mean, self.covariance = self.kalman_filter.predict(mean_state, self.covariance)
def activate(self, kalman_filter, frame_id):
"""Start a new tracklet"""
self.kalman_filter = kalman_filter
self.track_id = self.next_id()
self.mean, self.covariance = self.kalman_filter.initiate(self.tlwh_to_xyah(self._tlwh))
self.tracklet_len = 0
self.state = TrackState.Tracked
#self.is_activated = True
self.frame_id = frame_id
self.start_frame = frame_id
def re_activate(self, new_track, frame_id, new_id=False):
self.mean, self.covariance = self.kalman_filter.update(
self.mean, self.covariance, self.tlwh_to_xyah(new_track.tlwh)
)
self.update_features(new_track.curr_feat)
self.tracklet_len = 0
self.state = TrackState.Tracked
self.is_activated = True
self.frame_id = frame_id
if new_id:
self.track_id = self.next_id()
def update(self, new_track, frame_id, update_feature=True):
"""
Update a matched track
:type new_track: STrack
:type frame_id: int
:type update_feature: bool
:return:
"""
self.frame_id = frame_id
self.tracklet_len += 1
new_tlwh = new_track.tlwh
self.mean, self.covariance = self.kalman_filter.update(
self.mean, self.covariance, self.tlwh_to_xyah(new_tlwh))
self.state = TrackState.Tracked
self.is_activated = True
self.score = new_track.score
if update_feature:
self.update_features(new_track.curr_feat)
@property
@jit
def tlwh(self):
"""Get current position in bounding box format `(top left x, top left y,
width, height)`.
"""
if self.mean is None:
return self._tlwh.copy()
ret = self.mean[:4].copy()
ret[2] *= ret[3]
ret[:2] -= ret[2:] / 2
return ret
@property
@jit
def tlbr(self):
"""Convert bounding box to format `(min x, min y, max x, max y)`, i.e.,
`(top left, bottom right)`.
"""
ret = self.tlwh.copy()
ret[2:] += ret[:2]
return ret
@staticmethod
@jit
def tlwh_to_xyah(tlwh):
"""Convert bounding box to format `(center x, center y, aspect ratio,
height)`, where the aspect ratio is `width / height`.
"""
ret = np.asarray(tlwh).copy()
ret[:2] += ret[2:] / 2
ret[2] /= ret[3]
return ret
def to_xyah(self):
return self.tlwh_to_xyah(self.tlwh)
@staticmethod
@jit
def tlbr_to_tlwh(tlbr):
ret = np.asarray(tlbr).copy()
ret[2:] -= ret[:2]
return ret
@staticmethod
@jit
def tlwh_to_tlbr(tlwh):
ret = np.asarray(tlwh).copy()
ret[2:] += ret[:2]
return ret
def __repr__(self):
return 'OT_{}_({}-{})'.format(self.track_id, self.start_frame, self.end_frame)
class JDETracker(object):
def __init__(self, opt, frame_rate=30):
self.opt = opt
self.model = Darknet(opt.cfg, opt.img_size, nID=14455)
# load_darknet_weights(self.model, opt.weights)
self.model.load_state_dict(torch.load(opt.weights, map_location='cpu')['model'], strict=False)
self.model.cuda().eval()
self.tracked_stracks = [] # type: list[STrack]
self.lost_stracks = [] # type: list[STrack]
self.removed_stracks = [] # type: list[STrack]
self.frame_id = 0
self.det_thresh = opt.conf_thres
self.buffer_size = int(frame_rate / 30.0 * opt.track_buffer)
self.max_time_lost = self.buffer_size
self.kalman_filter = KalmanFilter()
def update(self, im_blob, img0):
self.frame_id += 1
activated_starcks = []
refind_stracks = []
lost_stracks = []
removed_stracks = []
t1 = time.time()
''' Step 1: Network forward, get detections & embeddings'''
with torch.no_grad():
pred = self.model(im_blob)
pred = pred[pred[:, :, 4] > self.opt.conf_thres]
if len(pred) > 0:
dets = non_max_suppression(pred.unsqueeze(0), self.opt.conf_thres, self.opt.nms_thres)[0].cpu()
scale_coords(self.opt.img_size, dets[:, :4], img0.shape).round()
'''Detections'''
detections = [STrack(STrack.tlbr_to_tlwh(tlbrs[:4]), tlbrs[4], f.numpy(), 30) for
(tlbrs, f) in zip(dets[:, :5], dets[:, -self.model.emb_dim:])]
else:
detections = []
t2 = time.time()
# print('Forward: {} s'.format(t2-t1))
''' Add newly detected tracklets to tracked_stracks'''
unconfirmed = []
tracked_stracks = [] # type: list[STrack]
for track in self.tracked_stracks:
if not track.is_activated:
unconfirmed.append(track)
else:
tracked_stracks.append(track)
''' Step 2: First association, with embedding'''
strack_pool = joint_stracks(tracked_stracks, self.lost_stracks)
# Predict the current location with KF
for strack in strack_pool:
strack.predict()
dists = matching.embedding_distance(strack_pool, detections)
dists = matching.gate_cost_matrix(self.kalman_filter, dists, strack_pool, detections)
matches, u_track, u_detection = matching.linear_assignment(dists, thresh=0.7)
for itracked, idet in matches:
track = strack_pool[itracked]
det = detections[idet]
if track.state == TrackState.Tracked:
track.update(detections[idet], self.frame_id)
activated_starcks.append(track)
else:
track.re_activate(det, self.frame_id, new_id=False)
refind_stracks.append(track)
''' Step 3: Second association, with IOU'''
detections = [detections[i] for i in u_detection]
r_tracked_stracks = [strack_pool[i] for i in u_track if strack_pool[i].state==TrackState.Tracked ]
dists = matching.iou_distance(r_tracked_stracks, detections)
matches, u_track, u_detection = matching.linear_assignment(dists, thresh=0.5)
for itracked, idet in matches:
track = r_tracked_stracks[itracked]
det = detections[idet]
if track.state == TrackState.Tracked:
track.update(det, self.frame_id)
activated_starcks.append(track)
else:
track.re_activate(det, self.frame_id, new_id=False)
refind_stracks.append(track)
for it in u_track:
track = r_tracked_stracks[it]
if not track.state == TrackState.Lost:
track.mark_lost()
lost_stracks.append(track)
'''Deal with unconfirmed tracks, usually tracks with only one beginning frame'''
detections = [detections[i] for i in u_detection]
dists = matching.iou_distance(unconfirmed, detections)
matches, u_unconfirmed, u_detection = matching.linear_assignment(dists, thresh=0.7)
for itracked, idet in matches:
unconfirmed[itracked].update(detections[idet], self.frame_id)
activated_starcks.append(unconfirmed[itracked])
for it in u_unconfirmed:
track = unconfirmed[it]
track.mark_removed()
removed_stracks.append(track)
""" Step 4: Init new stracks"""
for inew in u_detection:
track = detections[inew]
if track.score < self.det_thresh:
continue
track.activate(self.kalman_filter, self.frame_id)
activated_starcks.append(track)
""" Step 5: Update state"""
for track in self.lost_stracks:
if self.frame_id - track.end_frame > self.max_time_lost:
track.mark_removed()
removed_stracks.append(track)
t4 = time.time()
# print('Ramained match {} s'.format(t4-t3))
self.tracked_stracks = [t for t in self.tracked_stracks if t.state == TrackState.Tracked]
self.tracked_stracks = joint_stracks(self.tracked_stracks, activated_starcks)
self.tracked_stracks = joint_stracks(self.tracked_stracks, refind_stracks)
# self.lost_stracks = [t for t in self.lost_stracks if t.state == TrackState.Lost] # type: list[STrack]
self.lost_stracks = sub_stracks(self.lost_stracks, self.tracked_stracks)
self.lost_stracks.extend(lost_stracks)
self.lost_stracks = sub_stracks(self.lost_stracks, self.removed_stracks)
self.removed_stracks.extend(removed_stracks)
self.tracked_stracks, self.lost_stracks = remove_duplicate_stracks(self.tracked_stracks, self.lost_stracks)
# get scores of lost tracks
output_stracks = [track for track in self.tracked_stracks if track.is_activated]
logger.debug('===========Frame {}=========='.format(self.frame_id))
logger.debug('Activated: {}'.format([track.track_id for track in activated_starcks]))
logger.debug('Refind: {}'.format([track.track_id for track in refind_stracks]))
logger.debug('Lost: {}'.format([track.track_id for track in lost_stracks]))
logger.debug('Removed: {}'.format([track.track_id for track in removed_stracks]))
t5 = time.time()
# print('Final {} s'.format(t5-t4))
return output_stracks
def joint_stracks(tlista, tlistb):
exists = {}
res = []
for t in tlista:
exists[t.track_id] = 1
res.append(t)
for t in tlistb:
tid = t.track_id
if not exists.get(tid, 0):
exists[tid] = 1
res.append(t)
return res
def sub_stracks(tlista, tlistb):
stracks = {}
for t in tlista:
stracks[t.track_id] = t
for t in tlistb:
tid = t.track_id
if stracks.get(tid, 0):
del stracks[tid]
return list(stracks.values())
def remove_duplicate_stracks(stracksa, stracksb):
pdist = matching.iou_distance(stracksa, stracksb)
pairs = np.where(pdist<0.15)
dupa, dupb = list(), list()
for p,q in zip(*pairs):
timep = stracksa[p].frame_id - stracksa[p].start_frame
timeq = stracksb[q].frame_id - stracksb[q].start_frame
if timep > timeq:
dupb.append(q)
else:
dupa.append(p)
resa = [t for i,t in enumerate(stracksa) if not i in dupa]
resb = [t for i,t in enumerate(stracksb) if not i in dupb]
return resa, resb

381
train.py
View File

@ -1,191 +1,190 @@
import argparse
import json
import time
import test
from models import *
from utils.datasets import JointDataset, collate_fn
from utils.utils import *
from utils.log import logger
from torchvision.transforms import transforms as T
def train(
cfg,
data_cfg,
img_size=(1088,608),
resume=False,
epochs=100,
batch_size=16,
accumulated_batches=1,
freeze_backbone=False,
opt=None,
):
weights = 'weights'
mkdir_if_missing(weights)
latest = osp.join(weights, 'latest.pt')
torch.backends.cudnn.benchmark = True # unsuitable for multiscale
# Configure run
f = open(data_cfg)
data_config = json.load(f)
trainset_paths = data_config['train']
dataset_root = data_config['root']
f.close()
transforms = T.Compose([T.ToTensor()])
# Get dataloader
dataset = JointDataset(dataset_root, trainset_paths, img_size, augment=True, transforms=transforms)
dataloader = torch.utils.data.DataLoader(dataset, batch_size=batch_size, shuffle=True,
num_workers=8, pin_memory=True, drop_last=True, collate_fn=collate_fn)
# Initialize model
model = Darknet(cfg, img_size, dataset.nID)
cutoff = -1 # backbone reaches to cutoff layer
start_epoch = 0
if resume:
checkpoint = torch.load(latest, map_location='cpu')
# Load weights to resume from
model.load_state_dict(checkpoint['model'])
model.cuda().train()
# Set optimizer
optimizer = torch.optim.SGD(filter(lambda x: x.requires_grad, model.parameters()), lr=opt.lr, momentum=.9)
start_epoch = checkpoint['epoch'] + 1
if checkpoint['optimizer'] is not None:
optimizer.load_state_dict(checkpoint['optimizer'])
del checkpoint # current, saved
else:
# Initialize model with backbone (optional)
if cfg.endswith('yolov3.cfg'):
load_darknet_weights(model, osp.join(weights ,'darknet53.conv.74'))
cutoff = 75
elif cfg.endswith('yolov3-tiny.cfg'):
load_darknet_weights(model, osp.join(weights , 'yolov3-tiny.conv.15'))
cutoff = 15
model.cuda().train()
# Set optimizer
optimizer = torch.optim.SGD(filter(lambda x: x.requires_grad, model.parameters()), lr=opt.lr, momentum=.9, weight_decay=1e-4)
model = torch.nn.DataParallel(model)
# Set scheduler
scheduler = torch.optim.lr_scheduler.MultiStepLR(optimizer,
milestones=[int(0.5*opt.epochs), int(0.75*opt.epochs)], gamma=0.1)
# An important trick for detection: freeze bn during fine-tuning
if not opt.unfreeze_bn:
for i, (name, p) in enumerate(model.named_parameters()):
p.requires_grad = False if 'batch_norm' in name else True
model_info(model)
t0 = time.time()
for epoch in range(epochs):
epoch += start_epoch
logger.info(('%8s%12s' + '%10s' * 6) % (
'Epoch', 'Batch', 'box', 'conf', 'id', 'total', 'nTargets', 'time'))
# Update scheduler (automatic)
scheduler.step()
# Freeze darknet53.conv.74 for first epoch
if freeze_backbone and (epoch < 2):
for i, (name, p) in enumerate(model.named_parameters()):
if int(name.split('.')[2]) < cutoff: # if layer < 75
p.requires_grad = False if (epoch == 0) else True
ui = -1
rloss = defaultdict(float) # running loss
optimizer.zero_grad()
for i, (imgs, targets, _, _, targets_len) in enumerate(dataloader):
if sum([len(x) for x in targets]) < 1: # if no targets continue
continue
# SGD burn-in
burnin = min(1000, len(dataloader))
if (epoch == 0) & (i <= burnin):
lr = opt.lr * (i / burnin) **4
for g in optimizer.param_groups:
g['lr'] = lr
# Compute loss, compute gradient, update parameters
loss, components = model(imgs.cuda(), targets.cuda(), targets_len.cuda())
components = torch.mean(components.view(-1, 5),dim=0)
loss = torch.mean(loss)
loss.backward()
# accumulate gradient for x batches before optimizing
if ((i + 1) % accumulated_batches == 0) or (i == len(dataloader) - 1):
optimizer.step()
optimizer.zero_grad()
# Running epoch-means of tracked metrics
ui += 1
for ii, key in enumerate(model.module.loss_names):
rloss[key] = (rloss[key] * ui + components[ii]) / (ui + 1)
s = ('%8s%12s' + '%10.3g' * 6) % (
'%g/%g' % (epoch, epochs - 1),
'%g/%g' % (i, len(dataloader) - 1),
rloss['box'], rloss['conf'],
rloss['id'],rloss['loss'],
rloss['nT'], time.time() - t0)
t0 = time.time()
if i % opt.print_interval == 0:
logger.info(s)
# Save latest checkpoint
checkpoint = {'epoch': epoch,
'model': model.module.state_dict(),
'optimizer': optimizer.state_dict()}
torch.save(checkpoint, latest)
# Calculate mAP
if epoch % opt.test_interval ==0:
with torch.no_grad():
mAP, R, P = test.test(cfg, data_cfg, weights=latest, batch_size=batch_size, img_size=img_size, print_interval=40, nID=dataset.nID)
test.test_emb(cfg, data_cfg, weights=latest, batch_size=batch_size, img_size=img_size, print_interval=40, nID=dataset.nID)
if __name__ == '__main__':
parser = argparse.ArgumentParser()
parser.add_argument('--epochs', type=int, default=30, help='number of epochs')
parser.add_argument('--batch-size', type=int, default=32, help='size of each image batch')
parser.add_argument('--accumulated-batches', type=int, default=1, help='number of batches before optimizer step')
parser.add_argument('--cfg', type=str, default='cfg/yolov3.cfg', help='cfg file path')
parser.add_argument('--data-cfg', type=str, default='cfg/ccmcpe.json', help='coco.data file path')
parser.add_argument('--img-size', type=int, default=(1088, 608), help='pixels')
parser.add_argument('--resume', action='store_true', help='resume training flag')
parser.add_argument('--print-interval', type=int, default=40, help='print interval')
parser.add_argument('--test-interval', type=int, default=9, help='test interval')
parser.add_argument('--lr', type=float, default=1e-2, help='init lr')
parser.add_argument('--unfreeze-bn', action='store_true', help='unfreeze bn')
opt = parser.parse_args()
init_seeds()
train(
opt.cfg,
opt.data_cfg,
img_size=opt.img_size,
resume=opt.resume,
epochs=opt.epochs,
batch_size=opt.batch_size,
accumulated_batches=opt.accumulated_batches,
opt=opt,
)
import argparse
import json
import time
import test
from models import *
from utils.datasets import JointDataset, collate_fn
from utils.utils import *
from utils.log import logger
from torchvision.transforms import transforms as T
def train(
cfg,
data_cfg,
img_size=(1088,608),
resume=False,
epochs=100,
batch_size=16,
accumulated_batches=1,
freeze_backbone=False,
opt=None,
):
weights = 'weights'
mkdir_if_missing(weights)
latest = osp.join(weights, 'latest.pt')
torch.backends.cudnn.benchmark = True # unsuitable for multiscale
# Configure run
f = open(data_cfg)
data_config = json.load(f)
trainset_paths = data_config['train']
dataset_root = data_config['root']
f.close()
transforms = T.Compose([T.ToTensor()])
# Get dataloader
dataset = JointDataset(dataset_root, trainset_paths, img_size, augment=True, transforms=transforms)
dataloader = torch.utils.data.DataLoader(dataset, batch_size=batch_size, shuffle=True,
num_workers=8, pin_memory=True, drop_last=True, collate_fn=collate_fn)
# Initialize model
model = Darknet(cfg, img_size, dataset.nID)
cutoff = -1 # backbone reaches to cutoff layer
start_epoch = 0
if resume:
checkpoint = torch.load(latest, map_location='cpu')
# Load weights to resume from
model.load_state_dict(checkpoint['model'])
model.cuda().train()
# Set optimizer
optimizer = torch.optim.SGD(filter(lambda x: x.requires_grad, model.parameters()), lr=opt.lr, momentum=.9)
start_epoch = checkpoint['epoch'] + 1
if checkpoint['optimizer'] is not None:
optimizer.load_state_dict(checkpoint['optimizer'])
del checkpoint # current, saved
else:
# Initialize model with backbone (optional)
if cfg.endswith('yolov3.cfg'):
load_darknet_weights(model, osp.join(weights ,'darknet53.conv.74'))
cutoff = 75
elif cfg.endswith('yolov3-tiny.cfg'):
load_darknet_weights(model, osp.join(weights , 'yolov3-tiny.conv.15'))
cutoff = 15
model.cuda().train()
# Set optimizer
optimizer = torch.optim.SGD(filter(lambda x: x.requires_grad, model.parameters()), lr=opt.lr, momentum=.9, weight_decay=1e-4)
model = torch.nn.DataParallel(model)
# Set scheduler
scheduler = torch.optim.lr_scheduler.MultiStepLR(optimizer,
milestones=[int(0.5*opt.epochs), int(0.75*opt.epochs)], gamma=0.1)
# An important trick for detection: freeze bn during fine-tuning
if not opt.unfreeze_bn:
for i, (name, p) in enumerate(model.named_parameters()):
p.requires_grad = False if 'batch_norm' in name else True
model_info(model)
t0 = time.time()
for epoch in range(epochs):
epoch += start_epoch
logger.info(('%8s%12s' + '%10s' * 6) % (
'Epoch', 'Batch', 'box', 'conf', 'id', 'total', 'nTargets', 'time'))
# Freeze darknet53.conv.74 for first epoch
if freeze_backbone and (epoch < 2):
for i, (name, p) in enumerate(model.named_parameters()):
if int(name.split('.')[2]) < cutoff: # if layer < 75
p.requires_grad = False if (epoch == 0) else True
ui = -1
rloss = defaultdict(float) # running loss
optimizer.zero_grad()
for i, (imgs, targets, _, _, targets_len) in enumerate(dataloader):
if sum([len(x) for x in targets]) < 1: # if no targets continue
continue
# SGD burn-in
burnin = min(1000, len(dataloader))
if (epoch == 0) & (i <= burnin):
lr = opt.lr * (i / burnin) **4
for g in optimizer.param_groups:
g['lr'] = lr
# Compute loss, compute gradient, update parameters
loss, components = model(imgs.cuda(), targets.cuda(), targets_len.cuda())
components = torch.mean(components.view(-1, 5),dim=0)
loss = torch.mean(loss)
loss.backward()
# accumulate gradient for x batches before optimizing
if ((i + 1) % accumulated_batches == 0) or (i == len(dataloader) - 1):
optimizer.step()
optimizer.zero_grad()
# Running epoch-means of tracked metrics
ui += 1
for ii, key in enumerate(model.module.loss_names):
rloss[key] = (rloss[key] * ui + components[ii]) / (ui + 1)
s = ('%8s%12s' + '%10.3g' * 6) % (
'%g/%g' % (epoch, epochs - 1),
'%g/%g' % (i, len(dataloader) - 1),
rloss['box'], rloss['conf'],
rloss['id'],rloss['loss'],
rloss['nT'], time.time() - t0)
t0 = time.time()
if i % opt.print_interval == 0:
logger.info(s)
# Save latest checkpoint
checkpoint = {'epoch': epoch,
'model': model.module.state_dict(),
'optimizer': optimizer.state_dict()}
torch.save(checkpoint, latest)
# Calculate mAP
if epoch % opt.test_interval ==0:
with torch.no_grad():
mAP, R, P = test.test(cfg, data_cfg, weights=latest, batch_size=batch_size, img_size=img_size, print_interval=40, nID=dataset.nID)
test.test_emb(cfg, data_cfg, weights=latest, batch_size=batch_size, img_size=img_size, print_interval=40, nID=dataset.nID)
# Call scheduler.step() after opimizer.step() with pytorch > 1.1.0
scheduler.step()
if __name__ == '__main__':
parser = argparse.ArgumentParser()
parser.add_argument('--epochs', type=int, default=30, help='number of epochs')
parser.add_argument('--batch-size', type=int, default=32, help='size of each image batch')
parser.add_argument('--accumulated-batches', type=int, default=1, help='number of batches before optimizer step')
parser.add_argument('--cfg', type=str, default='cfg/yolov3.cfg', help='cfg file path')
parser.add_argument('--data-cfg', type=str, default='cfg/ccmcpe.json', help='coco.data file path')
parser.add_argument('--img-size', type=int, default=(1088, 608), help='pixels')
parser.add_argument('--resume', action='store_true', help='resume training flag')
parser.add_argument('--print-interval', type=int, default=40, help='print interval')
parser.add_argument('--test-interval', type=int, default=9, help='test interval')
parser.add_argument('--lr', type=float, default=1e-2, help='init lr')
parser.add_argument('--unfreeze-bn', action='store_true', help='unfreeze bn')
opt = parser.parse_args()
init_seeds()
train(
opt.cfg,
opt.data_cfg,
img_size=opt.img_size,
resume=opt.resume,
epochs=opt.epochs,
batch_size=opt.batch_size,
accumulated_batches=opt.accumulated_batches,
opt=opt,
)

View File

@ -1,411 +1,410 @@
import glob
import math
import os
import os.path as osp
import random
import time
from collections import OrderedDict
import cv2
import numpy as np
import torch
from torch.utils.data import Dataset
from utils.utils import xyxy2xywh
class LoadImages: # for inference
def __init__(self, path, img_size=(1088, 608)):
if os.path.isdir(path):
image_format = ['.jpg', '.jpeg', '.png', '.tif']
self.files = sorted(glob.glob('%s/*.*' % path))
self.files = list(filter(lambda x: os.path.splitext(x)[1].lower() in image_format, self.files))
elif os.path.isfile(path):
self.files = [path]
self.nF = len(self.files) # number of image files
self.width = img_size[0]
self.height = img_size[1]
self.count = 0
assert self.nF > 0, 'No images found in ' + path
def __iter__(self):
self.count = -1
return self
def __next__(self):
self.count += 1
if self.count == self.nF:
raise StopIteration
img_path = self.files[self.count]
# Read image
img0 = cv2.imread(img_path) # BGR
assert img0 is not None, 'Failed to load ' + img_path
# Padded resize
img, _, _, _ = letterbox(img0, height=self.height, width=self.width)
# Normalize RGB
img = img[:, :, ::-1].transpose(2, 0, 1)
img = np.ascontiguousarray(img, dtype=np.float32)
img /= 255.0
# cv2.imwrite(img_path + '.letterbox.jpg', 255 * img.transpose((1, 2, 0))[:, :, ::-1]) # save letterbox image
return img_path, img, img0
def __getitem__(self, idx):
idx = idx % self.nF
img_path = self.files[idx]
# Read image
img0 = cv2.imread(img_path) # BGR
assert img0 is not None, 'Failed to load ' + img_path
# Padded resize
img, _, _, _ = letterbox(img0, height=self.height, width=self.width)
# Normalize RGB
img = img[:, :, ::-1].transpose(2, 0, 1)
img = np.ascontiguousarray(img, dtype=np.float32)
img /= 255.0
return img_path, img, img0
def __len__(self):
return self.nF # number of files
class LoadVideo: # for inference
def __init__(self, path, img_size=(1088, 608)):
self.cap = cv2.VideoCapture(path)
self.frame_rate = int(round(self.cap.get(cv2.CAP_PROP_FPS)))
self.vw = int(self.cap.get(cv2.CAP_PROP_FRAME_WIDTH))
self.vh = int(self.cap.get(cv2.CAP_PROP_FRAME_HEIGHT))
self.vn = int(self.cap.get(cv2.CAP_PROP_FRAME_COUNT))
self.width = img_size[0]
self.height = img_size[1]
self.count = 0
self.w, self.h = self.get_size(self.vw, self.vh, self.width, self.height)
print('Lenth of the video: {:d} frames'.format(self.vn))
def get_size(self, vw, vh, dw, dh):
wa, ha = float(dw) / vw, float(dh) / vh
a = min(wa, ha)
return int(vw *a), int(vh*a)
def __iter__(self):
self.count = -1
return self
def __next__(self):
self.count += 1
if self.count == len(self):
raise StopIteration
# Read image
res, img0 = self.cap.read() # BGR
assert img0 is not None, 'Failed to load frame {:d}'.format(self.count)
img0 = cv2.resize(img0, (self.w, self.h))
# Padded resize
img, _, _, _ = letterbox(img0, height=self.height, width=self.width)
# Normalize RGB
img = img[:, :, ::-1].transpose(2, 0, 1)
img = np.ascontiguousarray(img, dtype=np.float32)
img /= 255.0
# cv2.imwrite(img_path + '.letterbox.jpg', 255 * img.transpose((1, 2, 0))[:, :, ::-1]) # save letterbox image
return self.count, img, img0
def __len__(self):
return self.vn # number of files
class LoadImagesAndLabels: # for training
def __init__(self, path, img_size=(1088,608), augment=False, transforms=None):
with open(path, 'r') as file:
self.img_files = file.readlines()
self.img_files = [x.replace('\n', '') for x in self.img_files]
self.img_files = list(filter(lambda x: len(x) > 0, self.img_files))
self.label_files = [x.replace('images', 'labels_with_ids').replace('.png', '.txt').replace('.jpg', '.txt')
for x in self.img_files]
self.nF = len(self.img_files) # number of image files
self.width = img_size[0]
self.height = img_size[1]
self.augment = augment
self.transforms = transforms
def __getitem__(self, files_index):
img_path = self.img_files[files_index]
label_path = self.label_files[files_index]
return self.get_data(img_path, label_path)
def get_data(self, img_path, label_path):
height = self.height
width = self.width
img = cv2.imread(img_path) # BGR
if img is None:
raise ValueError('File corrupt {}'.format(img_path))
augment_hsv = True
if self.augment and augment_hsv:
# SV augmentation by 50%
fraction = 0.50
img_hsv = cv2.cvtColor(img, cv2.COLOR_BGR2HSV)
S = img_hsv[:, :, 1].astype(np.float32)
V = img_hsv[:, :, 2].astype(np.float32)
a = (random.random() * 2 - 1) * fraction + 1
S *= a
if a > 1:
np.clip(S, a_min=0, a_max=255, out=S)
a = (random.random() * 2 - 1) * fraction + 1
V *= a
if a > 1:
np.clip(V, a_min=0, a_max=255, out=V)
img_hsv[:, :, 1] = S.astype(np.uint8)
img_hsv[:, :, 2] = V.astype(np.uint8)
cv2.cvtColor(img_hsv, cv2.COLOR_HSV2BGR, dst=img)
h, w, _ = img.shape
img, ratio, padw, padh = letterbox(img, height=height, width=width)
# Load labels
if os.path.isfile(label_path):
labels0 = np.loadtxt(label_path, dtype=np.float32).reshape(-1, 6)
# Normalized xywh to pixel xyxy format
labels = labels0.copy()
labels[:, 2] = ratio * w * (labels0[:, 2] - labels0[:, 4] / 2) + padw
labels[:, 3] = ratio * h * (labels0[:, 3] - labels0[:, 5] / 2) + padh
labels[:, 4] = ratio * w * (labels0[:, 2] + labels0[:, 4] / 2) + padw
labels[:, 5] = ratio * h * (labels0[:, 3] + labels0[:, 5] / 2) + padh
else:
labels = np.array([])
# Augment image and labels
if self.augment:
img, labels, M = random_affine(img, labels, degrees=(-5, 5), translate=(0.10, 0.10), scale=(0.50, 1.20))
plotFlag = False
if plotFlag:
import matplotlib
matplotlib.use('Agg')
import matplotlib.pyplot as plt
plt.figure(figsize=(50, 50))
plt.imshow(img[:, :, ::-1])
plt.plot(labels[:, [1, 3, 3, 1, 1]].T, labels[:, [2, 2, 4, 4, 2]].T, '.-')
plt.axis('off')
plt.savefig('test.jpg')
time.sleep(10)
nL = len(labels)
if nL > 0:
# convert xyxy to xywh
labels[:, 2:6] = xyxy2xywh(labels[:, 2:6].copy()) #/ height
labels[:, 2] /= width
labels[:, 3] /= height
labels[:, 4] /= width
labels[:, 5] /= height
if self.augment:
# random left-right flip
lr_flip = True
if lr_flip & (random.random() > 0.5):
img = np.fliplr(img)
if nL > 0:
labels[:, 2] = 1 - labels[:, 2]
img = np.ascontiguousarray(img[ :, :, ::-1]) # BGR to RGB
if self.transforms is not None:
img = self.transforms(img)
return img, labels, img_path, (h, w)
def __len__(self):
return self.nF # number of batches
def letterbox(img, height=608, width=1088, color=(127.5, 127.5, 127.5)): # resize a rectangular image to a padded rectangular
shape = img.shape[:2] # shape = [height, width]
ratio = min(float(height)/shape[0], float(width)/shape[1])
new_shape = (round(shape[1] * ratio), round(shape[0] * ratio)) # new_shape = [width, height]
dw = (width - new_shape[0]) / 2 # width padding
dh = (height - new_shape[1]) / 2 # height padding
top, bottom = round(dh - 0.1), round(dh + 0.1)
left, right = round(dw - 0.1), round(dw + 0.1)
img = cv2.resize(img, new_shape, interpolation=cv2.INTER_AREA) # resized, no border
img = cv2.copyMakeBorder(img, top, bottom, left, right, cv2.BORDER_CONSTANT, value=color) # padded rectangular
return img, ratio, dw, dh
def random_affine(img, targets=None, degrees=(-10, 10), translate=(.1, .1), scale=(.9, 1.1), shear=(-2, 2),
borderValue=(127.5, 127.5, 127.5)):
# torchvision.transforms.RandomAffine(degrees=(-10, 10), translate=(.1, .1), scale=(.9, 1.1), shear=(-10, 10))
# https://medium.com/uruvideo/dataset-augmentation-with-random-homographies-a8f4b44830d4
border = 0 # width of added border (optional)
height = img.shape[0]
width = img.shape[1]
# Rotation and Scale
R = np.eye(3)
a = random.random() * (degrees[1] - degrees[0]) + degrees[0]
# a += random.choice([-180, -90, 0, 90]) # 90deg rotations added to small rotations
s = random.random() * (scale[1] - scale[0]) + scale[0]
R[:2] = cv2.getRotationMatrix2D(angle=a, center=(img.shape[1] / 2, img.shape[0] / 2), scale=s)
# Translation
T = np.eye(3)
T[0, 2] = (random.random() * 2 - 1) * translate[0] * img.shape[0] + border # x translation (pixels)
T[1, 2] = (random.random() * 2 - 1) * translate[1] * img.shape[1] + border # y translation (pixels)
# Shear
S = np.eye(3)
S[0, 1] = math.tan((random.random() * (shear[1] - shear[0]) + shear[0]) * math.pi / 180) # x shear (deg)
S[1, 0] = math.tan((random.random() * (shear[1] - shear[0]) + shear[0]) * math.pi / 180) # y shear (deg)
M = S @ T @ R # Combined rotation matrix. ORDER IS IMPORTANT HERE!!
imw = cv2.warpPerspective(img, M, dsize=(width, height), flags=cv2.INTER_LINEAR,
borderValue=borderValue) # BGR order borderValue
# Return warped points also
if targets is not None:
if len(targets) > 0:
n = targets.shape[0]
points = targets[:, 2:6].copy()
area0 = (points[:, 2] - points[:, 0]) * (points[:, 3] - points[:, 1])
# warp points
xy = np.ones((n * 4, 3))
xy[:, :2] = points[:, [0, 1, 2, 3, 0, 3, 2, 1]].reshape(n * 4, 2) # x1y1, x2y2, x1y2, x2y1
xy = (xy @ M.T)[:, :2].reshape(n, 8)
# create new boxes
x = xy[:, [0, 2, 4, 6]]
y = xy[:, [1, 3, 5, 7]]
xy = np.concatenate((x.min(1), y.min(1), x.max(1), y.max(1))).reshape(4, n).T
# apply angle-based reduction
radians = a * math.pi / 180
reduction = max(abs(math.sin(radians)), abs(math.cos(radians))) ** 0.5
x = (xy[:, 2] + xy[:, 0]) / 2
y = (xy[:, 3] + xy[:, 1]) / 2
w = (xy[:, 2] - xy[:, 0]) * reduction
h = (xy[:, 3] - xy[:, 1]) * reduction
xy = np.concatenate((x - w / 2, y - h / 2, x + w / 2, y + h / 2)).reshape(4, n).T
# reject warped points outside of image
np.clip(xy[:, 0], 0, width, out=xy[:, 0])
np.clip(xy[:, 2], 0, width, out=xy[:, 2])
np.clip(xy[:, 1], 0, height, out=xy[:, 1])
np.clip(xy[:, 3], 0, height, out=xy[:, 3])
w = xy[:, 2] - xy[:, 0]
h = xy[:, 3] - xy[:, 1]
area = w * h
ar = np.maximum(w / (h + 1e-16), h / (w + 1e-16))
i = (w > 4) & (h > 4) & (area / (area0 + 1e-16) > 0.1) & (ar < 10)
targets = targets[i]
targets[:, 2:6] = xy[i]
return imw, targets, M
else:
return imw
def collate_fn(batch):
imgs, labels, paths, sizes = zip(*batch)
batch_size = len(labels)
imgs = torch.stack(imgs, 0)
max_box_len = max([l.shape[0] for l in labels])
labels = [torch.from_numpy(l) for l in labels]
filled_labels = torch.zeros(batch_size, max_box_len, 6)
labels_len = torch.zeros(batch_size)
for i in range(batch_size):
isize = labels[i].shape[0]
if len(labels[i])>0:
filled_labels[i, :isize, :] = labels[i]
labels_len[i] = isize
return imgs, filled_labels, paths, sizes, labels_len.unsqueeze(1)
class JointDataset(LoadImagesAndLabels): # for training
def __init__(self, root, paths, img_size=(1088,608), augment=False, transforms=None):
dataset_names = paths.keys()
self.img_files = OrderedDict()
self.label_files = OrderedDict()
self.tid_num = OrderedDict()
self.tid_start_index = OrderedDict()
for ds, path in paths.items():
with open(path, 'r') as file:
self.img_files[ds] = file.readlines()
self.img_files[ds] = [osp.join(root, x.strip()) for x in self.img_files[ds]]
self.img_files[ds] = list(filter(lambda x: len(x) > 0, self.img_files[ds]))
self.label_files[ds] = [x.replace('images', 'labels_with_ids').replace('.png', '.txt').replace('.jpg', '.txt')
for x in self.img_files[ds]]
for ds, label_paths in self.label_files.items():
max_index = -1
for lp in label_paths:
lb = np.loadtxt(lp)
if len(lb) < 1:
continue
if len(lb.shape) < 2:
img_max = lb[1]
else:
img_max = np.max(lb[:,1])
if img_max >max_index:
max_index = img_max
self.tid_num[ds] = max_index + 1
last_index = 0
for i, (k, v) in enumerate(self.tid_num.items()):
self.tid_start_index[k] = last_index
last_index += v
self.nID = int(last_index+1)
self.nds = [len(x) for x in self.img_files.values()]
self.cds = [sum(self.nds[:i]) for i in range(len(self.nds))]
self.nF = sum(self.nds)
self.width = img_size[0]
self.height = img_size[1]
self.augment = augment
self.transforms = transforms
print('='*80)
print('dataset summary')
print(self.tid_num)
print('total # identities:', self.nID)
print('start index')
print(self.tid_start_index)
print('='*80)
def __getitem__(self, files_index):
for i, c in enumerate(self.cds):
if files_index >= c:
ds = list(self.label_files.keys())[i]
start_index = c
img_path = self.img_files[ds][files_index - start_index]
label_path = self.label_files[ds][files_index - start_index]
imgs, labels, img_path, (h, w) = self.get_data(img_path, label_path)
for i, _ in enumerate(labels):
if labels[i,1] > -1:
labels[i,1] += self.tid_start_index[ds]
return imgs, labels, img_path, (h, w)
import glob
import math
import os
import os.path as osp
import random
import time
from collections import OrderedDict
import cv2
import numpy as np
import torch
from torch.utils.data import Dataset
from utils.utils import xyxy2xywh
class LoadImages: # for inference
def __init__(self, path, img_size=(1088, 608)):
if os.path.isdir(path):
image_format = ['.jpg', '.jpeg', '.png', '.tif']
self.files = sorted(glob.glob('%s/*.*' % path))
self.files = list(filter(lambda x: os.path.splitext(x)[1].lower() in image_format, self.files))
elif os.path.isfile(path):
self.files = [path]
self.nF = len(self.files) # number of image files
self.width = img_size[0]
self.height = img_size[1]
self.count = 0
assert self.nF > 0, 'No images found in ' + path
def __iter__(self):
self.count = -1
return self
def __next__(self):
self.count += 1
if self.count == self.nF:
raise StopIteration
img_path = self.files[self.count]
# Read image
img0 = cv2.imread(img_path) # BGR
assert img0 is not None, 'Failed to load ' + img_path
# Padded resize
img, _, _, _ = letterbox(img0, height=self.height, width=self.width)
# Normalize RGB
img = img[:, :, ::-1].transpose(2, 0, 1)
img = np.ascontiguousarray(img, dtype=np.float32)
img /= 255.0
# cv2.imwrite(img_path + '.letterbox.jpg', 255 * img.transpose((1, 2, 0))[:, :, ::-1]) # save letterbox image
return img_path, img, img0
def __getitem__(self, idx):
idx = idx % self.nF
img_path = self.files[idx]
# Read image
img0 = cv2.imread(img_path) # BGR
assert img0 is not None, 'Failed to load ' + img_path
# Padded resize
img, _, _, _ = letterbox(img0, height=self.height, width=self.width)
# Normalize RGB
img = img[:, :, ::-1].transpose(2, 0, 1)
img = np.ascontiguousarray(img, dtype=np.float32)
img /= 255.0
return img_path, img, img0
def __len__(self):
return self.nF # number of files
class LoadVideo: # for inference
def __init__(self, path, img_size=(1088, 608)):
self.cap = cv2.VideoCapture(path)
self.frame_rate = int(round(self.cap.get(cv2.CAP_PROP_FPS)))
self.vw = int(self.cap.get(cv2.CAP_PROP_FRAME_WIDTH))
self.vh = int(self.cap.get(cv2.CAP_PROP_FRAME_HEIGHT))
self.vn = int(self.cap.get(cv2.CAP_PROP_FRAME_COUNT))
self.width = img_size[0]
self.height = img_size[1]
self.count = 0
self.w, self.h = self.get_size(self.vw, self.vh, self.width, self.height)
print('Lenth of the video: {:d} frames'.format(self.vn))
def get_size(self, vw, vh, dw, dh):
wa, ha = float(dw) / vw, float(dh) / vh
a = min(wa, ha)
return int(vw *a), int(vh*a)
def __iter__(self):
self.count = -1
return self
def __next__(self):
self.count += 1
if self.count == len(self):
raise StopIteration
# Read image
res, img0 = self.cap.read() # BGR
assert img0 is not None, 'Failed to load frame {:d}'.format(self.count)
img0 = cv2.resize(img0, (self.w, self.h))
# Padded resize
img, _, _, _ = letterbox(img0, height=self.height, width=self.width)
# Normalize RGB
img = img[:, :, ::-1].transpose(2, 0, 1)
img = np.ascontiguousarray(img, dtype=np.float32)
img /= 255.0
# cv2.imwrite(img_path + '.letterbox.jpg', 255 * img.transpose((1, 2, 0))[:, :, ::-1]) # save letterbox image
return self.count, img, img0
def __len__(self):
return self.vn # number of files
class LoadImagesAndLabels: # for training
def __init__(self, path, img_size=(1088,608), augment=False, transforms=None):
with open(path, 'r') as file:
self.img_files = file.readlines()
self.img_files = [x.replace('\n', '') for x in self.img_files]
self.img_files = list(filter(lambda x: len(x) > 0, self.img_files))
self.label_files = [x.replace('images', 'labels_with_ids').replace('.png', '.txt').replace('.jpg', '.txt')
for x in self.img_files]
self.nF = len(self.img_files) # number of image files
self.width = img_size[0]
self.height = img_size[1]
self.augment = augment
self.transforms = transforms
def __getitem__(self, files_index):
img_path = self.img_files[files_index]
label_path = self.label_files[files_index]
return self.get_data(img_path, label_path)
def get_data(self, img_path, label_path):
height = self.height
width = self.width
img = cv2.imread(img_path) # BGR
if img is None:
raise ValueError('File corrupt {}'.format(img_path))
augment_hsv = True
if self.augment and augment_hsv:
# SV augmentation by 50%
fraction = 0.50
img_hsv = cv2.cvtColor(img, cv2.COLOR_BGR2HSV)
S = img_hsv[:, :, 1].astype(np.float32)
V = img_hsv[:, :, 2].astype(np.float32)
a = (random.random() * 2 - 1) * fraction + 1
S *= a
if a > 1:
np.clip(S, a_min=0, a_max=255, out=S)
a = (random.random() * 2 - 1) * fraction + 1
V *= a
if a > 1:
np.clip(V, a_min=0, a_max=255, out=V)
img_hsv[:, :, 1] = S.astype(np.uint8)
img_hsv[:, :, 2] = V.astype(np.uint8)
cv2.cvtColor(img_hsv, cv2.COLOR_HSV2BGR, dst=img)
h, w, _ = img.shape
img, ratio, padw, padh = letterbox(img, height=height, width=width)
# Load labels
if os.path.isfile(label_path):
labels0 = np.loadtxt(label_path, dtype=np.float32).reshape(-1, 6)
# Normalized xywh to pixel xyxy format
labels = labels0.copy()
labels[:, 2] = ratio * w * (labels0[:, 2] - labels0[:, 4] / 2) + padw
labels[:, 3] = ratio * h * (labels0[:, 3] - labels0[:, 5] / 2) + padh
labels[:, 4] = ratio * w * (labels0[:, 2] + labels0[:, 4] / 2) + padw
labels[:, 5] = ratio * h * (labels0[:, 3] + labels0[:, 5] / 2) + padh
else:
labels = np.array([])
# Augment image and labels
if self.augment:
img, labels, M = random_affine(img, labels, degrees=(-5, 5), translate=(0.10, 0.10), scale=(0.50, 1.20))
plotFlag = False
if plotFlag:
import matplotlib
matplotlib.use('Agg')
import matplotlib.pyplot as plt
plt.figure(figsize=(50, 50))
plt.imshow(img[:, :, ::-1])
plt.plot(labels[:, [1, 3, 3, 1, 1]].T, labels[:, [2, 2, 4, 4, 2]].T, '.-')
plt.axis('off')
plt.savefig('test.jpg')
time.sleep(10)
nL = len(labels)
if nL > 0:
# convert xyxy to xywh
labels[:, 2:6] = xyxy2xywh(labels[:, 2:6].copy()) #/ height
labels[:, 2] /= width
labels[:, 3] /= height
labels[:, 4] /= width
labels[:, 5] /= height
if self.augment:
# random left-right flip
lr_flip = True
if lr_flip & (random.random() > 0.5):
img = np.fliplr(img)
if nL > 0:
labels[:, 2] = 1 - labels[:, 2]
img = np.ascontiguousarray(img[ :, :, ::-1]) # BGR to RGB
if self.transforms is not None:
img = self.transforms(img)
return img, labels, img_path, (h, w)
def __len__(self):
return self.nF # number of batches
def letterbox(img, height=608, width=1088, color=(127.5, 127.5, 127.5)): # resize a rectangular image to a padded rectangular
shape = img.shape[:2] # shape = [height, width]
ratio = min(float(height)/shape[0], float(width)/shape[1])
new_shape = (round(shape[1] * ratio), round(shape[0] * ratio)) # new_shape = [width, height]
dw = (width - new_shape[0]) / 2 # width padding
dh = (height - new_shape[1]) / 2 # height padding
top, bottom = round(dh - 0.1), round(dh + 0.1)
left, right = round(dw - 0.1), round(dw + 0.1)
img = cv2.resize(img, new_shape, interpolation=cv2.INTER_AREA) # resized, no border
img = cv2.copyMakeBorder(img, top, bottom, left, right, cv2.BORDER_CONSTANT, value=color) # padded rectangular
return img, ratio, dw, dh
def random_affine(img, targets=None, degrees=(-10, 10), translate=(.1, .1), scale=(.9, 1.1), shear=(-2, 2),
borderValue=(127.5, 127.5, 127.5)):
# torchvision.transforms.RandomAffine(degrees=(-10, 10), translate=(.1, .1), scale=(.9, 1.1), shear=(-10, 10))
# https://medium.com/uruvideo/dataset-augmentation-with-random-homographies-a8f4b44830d4
border = 0 # width of added border (optional)
height = img.shape[0]
width = img.shape[1]
# Rotation and Scale
R = np.eye(3)
a = random.random() * (degrees[1] - degrees[0]) + degrees[0]
# a += random.choice([-180, -90, 0, 90]) # 90deg rotations added to small rotations
s = random.random() * (scale[1] - scale[0]) + scale[0]
R[:2] = cv2.getRotationMatrix2D(angle=a, center=(img.shape[1] / 2, img.shape[0] / 2), scale=s)
# Translation
T = np.eye(3)
T[0, 2] = (random.random() * 2 - 1) * translate[0] * img.shape[0] + border # x translation (pixels)
T[1, 2] = (random.random() * 2 - 1) * translate[1] * img.shape[1] + border # y translation (pixels)
# Shear
S = np.eye(3)
S[0, 1] = math.tan((random.random() * (shear[1] - shear[0]) + shear[0]) * math.pi / 180) # x shear (deg)
S[1, 0] = math.tan((random.random() * (shear[1] - shear[0]) + shear[0]) * math.pi / 180) # y shear (deg)
M = S @ T @ R # Combined rotation matrix. ORDER IS IMPORTANT HERE!!
imw = cv2.warpPerspective(img, M, dsize=(width, height), flags=cv2.INTER_LINEAR,
borderValue=borderValue) # BGR order borderValue
# Return warped points also
if targets is not None:
if len(targets) > 0:
n = targets.shape[0]
points = targets[:, 2:6].copy()
area0 = (points[:, 2] - points[:, 0]) * (points[:, 3] - points[:, 1])
# warp points
xy = np.ones((n * 4, 3))
xy[:, :2] = points[:, [0, 1, 2, 3, 0, 3, 2, 1]].reshape(n * 4, 2) # x1y1, x2y2, x1y2, x2y1
xy = (xy @ M.T)[:, :2].reshape(n, 8)
# create new boxes
x = xy[:, [0, 2, 4, 6]]
y = xy[:, [1, 3, 5, 7]]
xy = np.concatenate((x.min(1), y.min(1), x.max(1), y.max(1))).reshape(4, n).T
# apply angle-based reduction
radians = a * math.pi / 180
reduction = max(abs(math.sin(radians)), abs(math.cos(radians))) ** 0.5
x = (xy[:, 2] + xy[:, 0]) / 2
y = (xy[:, 3] + xy[:, 1]) / 2
w = (xy[:, 2] - xy[:, 0]) * reduction
h = (xy[:, 3] - xy[:, 1]) * reduction
xy = np.concatenate((x - w / 2, y - h / 2, x + w / 2, y + h / 2)).reshape(4, n).T
# reject warped points outside of image
np.clip(xy[:, 0], 0, width, out=xy[:, 0])
np.clip(xy[:, 2], 0, width, out=xy[:, 2])
np.clip(xy[:, 1], 0, height, out=xy[:, 1])
np.clip(xy[:, 3], 0, height, out=xy[:, 3])
w = xy[:, 2] - xy[:, 0]
h = xy[:, 3] - xy[:, 1]
area = w * h
ar = np.maximum(w / (h + 1e-16), h / (w + 1e-16))
i = (w > 4) & (h > 4) & (area / (area0 + 1e-16) > 0.1) & (ar < 10)
targets = targets[i]
targets[:, 2:6] = xy[i]
return imw, targets, M
else:
return imw
def collate_fn(batch):
imgs, labels, paths, sizes = zip(*batch)
batch_size = len(labels)
imgs = torch.stack(imgs, 0)
max_box_len = max([l.shape[0] for l in labels])
labels = [torch.from_numpy(l) for l in labels]
filled_labels = torch.zeros(batch_size, max_box_len, 6)
labels_len = torch.zeros(batch_size)
for i in range(batch_size):
isize = labels[i].shape[0]
if len(labels[i])>0:
filled_labels[i, :isize, :] = labels[i]
labels_len[i] = isize
return imgs, filled_labels, paths, sizes, labels_len.unsqueeze(1)
class JointDataset(LoadImagesAndLabels): # for training
def __init__(self, root, paths, img_size=(1088,608), augment=False, transforms=None):
dataset_names = paths.keys()
self.img_files = OrderedDict()
self.label_files = OrderedDict()
self.tid_num = OrderedDict()
self.tid_start_index = OrderedDict()
for ds, path in paths.items():
with open(path, 'r') as file:
self.img_files[ds] = file.readlines()
self.img_files[ds] = [osp.join(root, x.strip()) for x in self.img_files[ds]]
self.img_files[ds] = list(filter(lambda x: len(x) > 0, self.img_files[ds]))
self.label_files[ds] = [x.replace('images', 'labels_with_ids').replace('.png', '.txt').replace('.jpg', '.txt')
for x in self.img_files[ds]]
for ds, label_paths in self.label_files.items():
max_index = -1
for lp in label_paths:
lb = np.loadtxt(lp)
if len(lb) < 1:
continue
if len(lb.shape) < 2:
img_max = lb[1]
else:
img_max = np.max(lb[:,1])
if img_max >max_index:
max_index = img_max
self.tid_num[ds] = max_index + 1
last_index = 0
for i, (k, v) in enumerate(self.tid_num.items()):
self.tid_start_index[k] = last_index
last_index += v
self.nID = int(last_index+1)
self.nds = [len(x) for x in self.img_files.values()]
self.cds = [sum(self.nds[:i]) for i in range(len(self.nds))]
self.nF = sum(self.nds)
self.width = img_size[0]
self.height = img_size[1]
self.augment = augment
self.transforms = transforms
print('='*80)
print('dataset summary')
print(self.tid_num)
print('total # identities:', self.nID)
print('start index')
print(self.tid_start_index)
print('='*80)
def __getitem__(self, files_index):
for i, c in enumerate(self.cds):
if files_index >= c:
ds = list(self.label_files.keys())[i]
start_index = c
img_path = self.img_files[ds][files_index - start_index]
label_path = self.label_files[ds][files_index - start_index]
imgs, labels, img_path, (h, w) = self.get_data(img_path, label_path)
for i, _ in enumerate(labels):
if labels[i,1] > -1:
labels[i,1] += self.tid_start_index[ds]
return imgs, labels, img_path, (h, w)

View File

@ -1,101 +1,101 @@
import os
import numpy as np
import copy
import motmetrics as mm
from utils.io import read_results, unzip_objs
class Evaluator(object):
def __init__(self, data_root, seq_name, data_type):
self.data_root = data_root
self.seq_name = seq_name
self.data_type = data_type
self.load_annotations()
self.reset_accumulator()
def load_annotations(self):
assert self.data_type == 'mot'
gt_filename = os.path.join(self.data_root, self.seq_name, 'gt', 'gt.txt')
self.gt_frame_dict = read_results(gt_filename, self.data_type, is_gt=True)
self.gt_ignore_frame_dict = read_results(gt_filename, self.data_type, is_ignore=True)
def reset_accumulator(self):
self.acc = mm.MOTAccumulator(auto_id=True)
def eval_frame(self, frame_id, trk_tlwhs, trk_ids, rtn_events=False):
# results
trk_tlwhs = np.copy(trk_tlwhs)
trk_ids = np.copy(trk_ids)
# gts
gt_objs = self.gt_frame_dict.get(frame_id, [])
gt_tlwhs, gt_ids = unzip_objs(gt_objs)[:2]
# ignore boxes
ignore_objs = self.gt_ignore_frame_dict.get(frame_id, [])
ignore_tlwhs = unzip_objs(ignore_objs)[0]
# remove ignored results
keep = np.ones(len(trk_tlwhs), dtype=bool)
iou_distance = mm.distances.iou_matrix(ignore_tlwhs, trk_tlwhs, max_iou=0.5)
match_is, match_js = mm.lap.linear_sum_assignment(iou_distance)
match_is, match_js = map(lambda a: np.asarray(a, dtype=int), [match_is, match_js])
match_ious = iou_distance[match_is, match_js]
match_js = np.asarray(match_js, dtype=int)
match_js = match_js[np.logical_not(np.isnan(match_ious))]
keep[match_js] = False
trk_tlwhs = trk_tlwhs[keep]
trk_ids = trk_ids[keep]
# get distance matrix
iou_distance = mm.distances.iou_matrix(gt_tlwhs, trk_tlwhs, max_iou=0.5)
# acc
self.acc.update(gt_ids, trk_ids, iou_distance)
if rtn_events and iou_distance.size > 0 and hasattr(self.acc, 'last_mot_events'):
events = self.acc.last_mot_events # only supported by https://github.com/longcw/py-motmetrics
else:
events = None
return events
def eval_file(self, filename):
self.reset_accumulator()
result_frame_dict = read_results(filename, self.data_type, is_gt=False)
frames = sorted(list(set(self.gt_frame_dict.keys()) | set(result_frame_dict.keys())))
for frame_id in frames:
trk_objs = result_frame_dict.get(frame_id, [])
trk_tlwhs, trk_ids = unzip_objs(trk_objs)[:2]
self.eval_frame(frame_id, trk_tlwhs, trk_ids, rtn_events=False)
return self.acc
@staticmethod
def get_summary(accs, names, metrics=('mota', 'num_switches', 'idp', 'idr', 'idf1', 'precision', 'recall')):
names = copy.deepcopy(names)
if metrics is None:
metrics = mm.metrics.motchallenge_metrics
metrics = copy.deepcopy(metrics)
mh = mm.metrics.create()
summary = mh.compute_many(
accs,
metrics=metrics,
names=names,
generate_overall=True
)
return summary
@staticmethod
def save_summary(summary, filename):
import pandas as pd
writer = pd.ExcelWriter(filename)
summary.to_excel(writer)
writer.save()
import os
import numpy as np
import copy
import motmetrics as mm
from utils.io import read_results, unzip_objs
class Evaluator(object):
def __init__(self, data_root, seq_name, data_type):
self.data_root = data_root
self.seq_name = seq_name
self.data_type = data_type
self.load_annotations()
self.reset_accumulator()
def load_annotations(self):
assert self.data_type == 'mot'
gt_filename = os.path.join(self.data_root, self.seq_name, 'gt', 'gt.txt')
self.gt_frame_dict = read_results(gt_filename, self.data_type, is_gt=True)
self.gt_ignore_frame_dict = read_results(gt_filename, self.data_type, is_ignore=True)
def reset_accumulator(self):
self.acc = mm.MOTAccumulator(auto_id=True)
def eval_frame(self, frame_id, trk_tlwhs, trk_ids, rtn_events=False):
# results
trk_tlwhs = np.copy(trk_tlwhs)
trk_ids = np.copy(trk_ids)
# gts
gt_objs = self.gt_frame_dict.get(frame_id, [])
gt_tlwhs, gt_ids = unzip_objs(gt_objs)[:2]
# ignore boxes
ignore_objs = self.gt_ignore_frame_dict.get(frame_id, [])
ignore_tlwhs = unzip_objs(ignore_objs)[0]
# remove ignored results
keep = np.ones(len(trk_tlwhs), dtype=bool)
iou_distance = mm.distances.iou_matrix(ignore_tlwhs, trk_tlwhs, max_iou=0.5)
match_is, match_js = mm.lap.linear_sum_assignment(iou_distance)
match_is, match_js = map(lambda a: np.asarray(a, dtype=int), [match_is, match_js])
match_ious = iou_distance[match_is, match_js]
match_js = np.asarray(match_js, dtype=int)
match_js = match_js[np.logical_not(np.isnan(match_ious))]
keep[match_js] = False
trk_tlwhs = trk_tlwhs[keep]
trk_ids = trk_ids[keep]
# get distance matrix
iou_distance = mm.distances.iou_matrix(gt_tlwhs, trk_tlwhs, max_iou=0.5)
# acc
self.acc.update(gt_ids, trk_ids, iou_distance)
if rtn_events and iou_distance.size > 0 and hasattr(self.acc, 'last_mot_events'):
events = self.acc.last_mot_events # only supported by https://github.com/longcw/py-motmetrics
else:
events = None
return events
def eval_file(self, filename):
self.reset_accumulator()
result_frame_dict = read_results(filename, self.data_type, is_gt=False)
frames = sorted(list(set(self.gt_frame_dict.keys()) | set(result_frame_dict.keys())))
for frame_id in frames:
trk_objs = result_frame_dict.get(frame_id, [])
trk_tlwhs, trk_ids = unzip_objs(trk_objs)[:2]
self.eval_frame(frame_id, trk_tlwhs, trk_ids, rtn_events=False)
return self.acc
@staticmethod
def get_summary(accs, names, metrics=('mota', 'num_switches', 'idp', 'idr', 'idf1', 'precision', 'recall')):
names = copy.deepcopy(names)
if metrics is None:
metrics = mm.metrics.motchallenge_metrics
metrics = copy.deepcopy(metrics)
mh = mm.metrics.create()
summary = mh.compute_many(
accs,
metrics=metrics,
names=names,
generate_overall=True
)
return summary
@staticmethod
def save_summary(summary, filename):
import pandas as pd
writer = pd.ExcelWriter(filename)
summary.to_excel(writer)
writer.save()

View File

@ -1,112 +1,112 @@
import os
from typing import Dict
import numpy as np
from utils.log import logger
def write_results(filename, results_dict: Dict, data_type: str):
if not filename:
return
path = os.path.dirname(filename)
if not os.path.exists(path):
os.makedirs(path)
if data_type in ('mot', 'mcmot', 'lab'):
save_format = '{frame},{id},{x1},{y1},{w},{h},1,-1,-1,-1\n'
elif data_type == 'kitti':
save_format = '{frame} {id} pedestrian -1 -1 -10 {x1} {y1} {x2} {y2} -1 -1 -1 -1000 -1000 -1000 -10 {score}\n'
else:
raise ValueError(data_type)
with open(filename, 'w') as f:
for frame_id, frame_data in results_dict.items():
if data_type == 'kitti':
frame_id -= 1
for tlwh, track_id in frame_data:
if track_id < 0:
continue
x1, y1, w, h = tlwh
x2, y2 = x1 + w, y1 + h
line = save_format.format(frame=frame_id, id=track_id, x1=x1, y1=y1, x2=x2, y2=y2, w=w, h=h, score=1.0)
f.write(line)
logger.info('Save results to {}'.format(filename))
def read_results(filename, data_type: str, is_gt=False, is_ignore=False):
if data_type in ('mot', 'lab'):
read_fun = read_mot_results
else:
raise ValueError('Unknown data type: {}'.format(data_type))
return read_fun(filename, is_gt, is_ignore)
"""
labels={'ped', ... % 1
'person_on_vhcl', ... % 2
'car', ... % 3
'bicycle', ... % 4
'mbike', ... % 5
'non_mot_vhcl', ... % 6
'static_person', ... % 7
'distractor', ... % 8
'occluder', ... % 9
'occluder_on_grnd', ... %10
'occluder_full', ... % 11
'reflection', ... % 12
'crowd' ... % 13
};
"""
def read_mot_results(filename, is_gt, is_ignore):
valid_labels = {1}
ignore_labels = {2, 7, 8, 12}
results_dict = dict()
if os.path.isfile(filename):
with open(filename, 'r') as f:
for line in f.readlines():
linelist = line.split(',')
if len(linelist) < 7:
continue
fid = int(linelist[0])
if fid < 1:
continue
results_dict.setdefault(fid, list())
if is_gt:
if 'MOT16-' in filename or 'MOT17-' in filename:
label = int(float(linelist[7]))
mark = int(float(linelist[6]))
if mark == 0 or label not in valid_labels:
continue
score = 1
elif is_ignore:
if 'MOT16-' in filename or 'MOT17-' in filename:
label = int(float(linelist[7]))
vis_ratio = float(linelist[8])
if label not in ignore_labels and vis_ratio >= 0:
continue
else:
continue
score = 1
else:
score = float(linelist[6])
tlwh = tuple(map(float, linelist[2:6]))
target_id = int(linelist[1])
results_dict[fid].append((tlwh, target_id, score))
return results_dict
def unzip_objs(objs):
if len(objs) > 0:
tlwhs, ids, scores = zip(*objs)
else:
tlwhs, ids, scores = [], [], []
tlwhs = np.asarray(tlwhs, dtype=float).reshape(-1, 4)
import os
from typing import Dict
import numpy as np
from utils.log import logger
def write_results(filename, results_dict: Dict, data_type: str):
if not filename:
return
path = os.path.dirname(filename)
if not os.path.exists(path):
os.makedirs(path)
if data_type in ('mot', 'mcmot', 'lab'):
save_format = '{frame},{id},{x1},{y1},{w},{h},1,-1,-1,-1\n'
elif data_type == 'kitti':
save_format = '{frame} {id} pedestrian -1 -1 -10 {x1} {y1} {x2} {y2} -1 -1 -1 -1000 -1000 -1000 -10 {score}\n'
else:
raise ValueError(data_type)
with open(filename, 'w') as f:
for frame_id, frame_data in results_dict.items():
if data_type == 'kitti':
frame_id -= 1
for tlwh, track_id in frame_data:
if track_id < 0:
continue
x1, y1, w, h = tlwh
x2, y2 = x1 + w, y1 + h
line = save_format.format(frame=frame_id, id=track_id, x1=x1, y1=y1, x2=x2, y2=y2, w=w, h=h, score=1.0)
f.write(line)
logger.info('Save results to {}'.format(filename))
def read_results(filename, data_type: str, is_gt=False, is_ignore=False):
if data_type in ('mot', 'lab'):
read_fun = read_mot_results
else:
raise ValueError('Unknown data type: {}'.format(data_type))
return read_fun(filename, is_gt, is_ignore)
"""
labels={'ped', ... % 1
'person_on_vhcl', ... % 2
'car', ... % 3
'bicycle', ... % 4
'mbike', ... % 5
'non_mot_vhcl', ... % 6
'static_person', ... % 7
'distractor', ... % 8
'occluder', ... % 9
'occluder_on_grnd', ... %10
'occluder_full', ... % 11
'reflection', ... % 12
'crowd' ... % 13
};
"""
def read_mot_results(filename, is_gt, is_ignore):
valid_labels = {1}
ignore_labels = {2, 7, 8, 12}
results_dict = dict()
if os.path.isfile(filename):
with open(filename, 'r') as f:
for line in f.readlines():
linelist = line.split(',')
if len(linelist) < 7:
continue
fid = int(linelist[0])
if fid < 1:
continue
results_dict.setdefault(fid, list())
if is_gt:
if 'MOT16-' in filename or 'MOT17-' in filename:
label = int(float(linelist[7]))
mark = int(float(linelist[6]))
if mark == 0 or label not in valid_labels:
continue
score = 1
elif is_ignore:
if 'MOT16-' in filename or 'MOT17-' in filename:
label = int(float(linelist[7]))
vis_ratio = float(linelist[8])
if label not in ignore_labels and vis_ratio >= 0:
continue
else:
continue
score = 1
else:
score = float(linelist[6])
tlwh = tuple(map(float, linelist[2:6]))
target_id = int(linelist[1])
results_dict[fid].append((tlwh, target_id, score))
return results_dict
def unzip_objs(objs):
if len(objs) > 0:
tlwhs, ids, scores = zip(*objs)
else:
tlwhs, ids, scores = [], [], []
tlwhs = np.asarray(tlwhs, dtype=float).reshape(-1, 4)
return tlwhs, ids, scores

View File

@ -1,18 +1,18 @@
import logging
def get_logger(name='root'):
formatter = logging.Formatter(
# fmt='%(asctime)s [%(levelname)s]: %(filename)s(%(funcName)s:%(lineno)s) >> %(message)s')
fmt='%(asctime)s [%(levelname)s]: %(message)s', datefmt='%Y-%m-%d %H:%M:%S')
handler = logging.StreamHandler()
handler.setFormatter(formatter)
logger = logging.getLogger(name)
logger.setLevel(logging.DEBUG)
logger.addHandler(handler)
return logger
logger = get_logger('root')
import logging
def get_logger(name='root'):
formatter = logging.Formatter(
# fmt='%(asctime)s [%(levelname)s]: %(filename)s(%(funcName)s:%(lineno)s) >> %(message)s')
fmt='%(asctime)s [%(levelname)s]: %(message)s', datefmt='%Y-%m-%d %H:%M:%S')
handler = logging.StreamHandler()
handler.setFormatter(formatter)
logger = logging.getLogger(name)
logger.setLevel(logging.DEBUG)
logger.addHandler(handler)
return logger
logger = get_logger('root')

View File

@ -1,7 +0,0 @@
# Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved.
# from ._utils import _C
from utils import _C
nms = _C.nms
# nms.__doc__ = """
# This function performs Non-maximum suppresion"""

32
utils/nms/nms.h Normal file
View File

@ -0,0 +1,32 @@
// Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved.
#pragma once
#include <torch/extension.h>
at::Tensor nms_cpu(const at::Tensor& dets, const at::Tensor& scores, const float threshold);
#ifdef WITH_CUDA
at::Tensor nms_cuda(const at::Tensor boxes, float nms_overlap_thresh);
#endif
at::Tensor nms(const at::Tensor& dets,
const at::Tensor& scores,
const float threshold) {
if (dets.type().is_cuda()) {
#ifdef WITH_CUDA
// TODO raise error if not compiled with CUDA
if (dets.numel() == 0)
return at::empty({0}, dets.options().dtype(at::kLong).device(at::kCPU));
auto b = at::cat({dets, scores.unsqueeze(1)}, 1);
return nms_cuda(b, threshold);
#else
AT_ERROR("Not compiled with GPU support");
#endif
}
at::Tensor result = nms_cpu(dets, scores, threshold);
return result;
}
PYBIND11_MODULE(TORCH_EXTENSION_NAME, m){
m.def("nms", &nms, "non-maximum suppression");
}

74
utils/nms/nms_cpu.cpp Normal file
View File

@ -0,0 +1,74 @@
// Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved.
#include "nms.h"
template <typename scalar_t>
at::Tensor nms_cpu_kernel(const at::Tensor& dets,
const at::Tensor& scores,
const float threshold) {
AT_ASSERTM(!dets.type().is_cuda(), "dets must be a CPU tensor");
AT_ASSERTM(!scores.type().is_cuda(), "scores must be a CPU tensor");
AT_ASSERTM(dets.type() == scores.type(), "dets should have the same type as scores");
if (dets.numel() == 0) {
return at::empty({0}, dets.options().dtype(at::kLong).device(at::kCPU));
}
auto x1_t = dets.select(1, 0).contiguous();
auto y1_t = dets.select(1, 1).contiguous();
auto x2_t = dets.select(1, 2).contiguous();
auto y2_t = dets.select(1, 3).contiguous();
at::Tensor areas_t = (x2_t - x1_t + 1) * (y2_t - y1_t + 1);
auto order_t = std::get<1>(scores.sort(0, /* descending=*/true));
auto ndets = dets.size(0);
at::Tensor suppressed_t = at::zeros({ndets}, dets.options().dtype(at::kByte).device(at::kCPU));
auto suppressed = suppressed_t.data<uint8_t>();
auto order = order_t.data<int64_t>();
auto x1 = x1_t.data<scalar_t>();
auto y1 = y1_t.data<scalar_t>();
auto x2 = x2_t.data<scalar_t>();
auto y2 = y2_t.data<scalar_t>();
auto areas = areas_t.data<scalar_t>();
for (int64_t _i = 0; _i < ndets; _i++) {
auto i = order[_i];
if (suppressed[i] == 1)
continue;
auto ix1 = x1[i];
auto iy1 = y1[i];
auto ix2 = x2[i];
auto iy2 = y2[i];
auto iarea = areas[i];
for (int64_t _j = _i + 1; _j < ndets; _j++) {
auto j = order[_j];
if (suppressed[j] == 1)
continue;
auto xx1 = std::max(ix1, x1[j]);
auto yy1 = std::max(iy1, y1[j]);
auto xx2 = std::min(ix2, x2[j]);
auto yy2 = std::min(iy2, y2[j]);
auto w = std::max(static_cast<scalar_t>(0), xx2 - xx1 + 1);
auto h = std::max(static_cast<scalar_t>(0), yy2 - yy1 + 1);
auto inter = w * h;
auto ovr = inter / (iarea + areas[j] - inter);
if (ovr >= threshold)
suppressed[j] = 1;
}
}
return at::nonzero(suppressed_t == 0).squeeze(1);
}
at::Tensor nms_cpu(const at::Tensor& dets,
const at::Tensor& scores,
const float threshold) {
at::Tensor result;
AT_DISPATCH_FLOATING_TYPES(dets.type(), "nms", [&] {
result = nms_cpu_kernel<scalar_t>(dets, scores, threshold);
});
return result;
}

131
utils/nms/nms_kernel.cu Normal file
View File

@ -0,0 +1,131 @@
#include <ATen/ATen.h>
#include <ATen/cuda/CUDAContext.h>
#include <THC/THC.h>
#include <THC/THCDeviceUtils.cuh>
#include <vector>
#include <iostream>
int const threadsPerBlock = sizeof(unsigned long long) * 8;
__device__ inline float devIoU(float const * const a, float const * const b) {
float left = max(a[0], b[0]), right = min(a[2], b[2]);
float top = max(a[1], b[1]), bottom = min(a[3], b[3]);
float width = max(right - left + 1, 0.f), height = max(bottom - top + 1, 0.f);
float interS = width * height;
float Sa = (a[2] - a[0] + 1) * (a[3] - a[1] + 1);
float Sb = (b[2] - b[0] + 1) * (b[3] - b[1] + 1);
return interS / (Sa + Sb - interS);
}
__global__ void nms_kernel(const int n_boxes, const float nms_overlap_thresh,
const float *dev_boxes, unsigned long long *dev_mask) {
const int row_start = blockIdx.y;
const int col_start = blockIdx.x;
// if (row_start > col_start) return;
const int row_size =
min(n_boxes - row_start * threadsPerBlock, threadsPerBlock);
const int col_size =
min(n_boxes - col_start * threadsPerBlock, threadsPerBlock);
__shared__ float block_boxes[threadsPerBlock * 5];
if (threadIdx.x < col_size) {
block_boxes[threadIdx.x * 5 + 0] =
dev_boxes[(threadsPerBlock * col_start + threadIdx.x) * 5 + 0];
block_boxes[threadIdx.x * 5 + 1] =
dev_boxes[(threadsPerBlock * col_start + threadIdx.x) * 5 + 1];
block_boxes[threadIdx.x * 5 + 2] =
dev_boxes[(threadsPerBlock * col_start + threadIdx.x) * 5 + 2];
block_boxes[threadIdx.x * 5 + 3] =
dev_boxes[(threadsPerBlock * col_start + threadIdx.x) * 5 + 3];
block_boxes[threadIdx.x * 5 + 4] =
dev_boxes[(threadsPerBlock * col_start + threadIdx.x) * 5 + 4];
}
__syncthreads();
if (threadIdx.x < row_size) {
const int cur_box_idx = threadsPerBlock * row_start + threadIdx.x;
const float *cur_box = dev_boxes + cur_box_idx * 5;
int i = 0;
unsigned long long t = 0;
int start = 0;
if (row_start == col_start) {
start = threadIdx.x + 1;
}
for (i = start; i < col_size; i++) {
if (devIoU(cur_box, block_boxes + i * 5) > nms_overlap_thresh) {
t |= 1ULL << i;
}
}
const int col_blocks = THCCeilDiv(n_boxes, threadsPerBlock);
dev_mask[cur_box_idx * col_blocks + col_start] = t;
}
}
// boxes is a N x 5 tensor
at::Tensor nms_cuda(const at::Tensor boxes, float nms_overlap_thresh) {
using scalar_t = float;
AT_ASSERTM(boxes.type().is_cuda(), "boxes must be a CUDA tensor");
auto scores = boxes.select(1, 4);
auto order_t = std::get<1>(scores.sort(0, /* descending=*/true));
auto boxes_sorted = boxes.index_select(0, order_t);
int boxes_num = boxes.size(0);
const int col_blocks = THCCeilDiv(boxes_num, threadsPerBlock);
scalar_t* boxes_dev = boxes_sorted.data<scalar_t>();
THCState *state = at::globalContext().lazyInitCUDA(); // TODO replace with getTHCState
unsigned long long* mask_dev = NULL;
//THCudaCheck(THCudaMalloc(state, (void**) &mask_dev,
// boxes_num * col_blocks * sizeof(unsigned long long)));
mask_dev = (unsigned long long*) THCudaMalloc(state, boxes_num * col_blocks * sizeof(unsigned long long));
dim3 blocks(THCCeilDiv(boxes_num, threadsPerBlock),
THCCeilDiv(boxes_num, threadsPerBlock));
dim3 threads(threadsPerBlock);
nms_kernel<<<blocks, threads>>>(boxes_num,
nms_overlap_thresh,
boxes_dev,
mask_dev);
std::vector<unsigned long long> mask_host(boxes_num * col_blocks);
THCudaCheck(cudaMemcpy(&mask_host[0],
mask_dev,
sizeof(unsigned long long) * boxes_num * col_blocks,
cudaMemcpyDeviceToHost));
std::vector<unsigned long long> remv(col_blocks);
memset(&remv[0], 0, sizeof(unsigned long long) * col_blocks);
at::Tensor keep = at::empty({boxes_num}, boxes.options().dtype(at::kLong).device(at::kCPU));
int64_t* keep_out = keep.data<int64_t>();
int num_to_keep = 0;
for (int i = 0; i < boxes_num; i++) {
int nblock = i / threadsPerBlock;
int inblock = i % threadsPerBlock;
if (!(remv[nblock] & (1ULL << inblock))) {
keep_out[num_to_keep++] = i;
unsigned long long *p = &mask_host[0] + i * col_blocks;
for (int j = nblock; j < col_blocks; j++) {
remv[j] |= p[j];
}
}
}
THCudaFree(state, mask_dev);
// TODO improve this part
return std::get<0>(order_t.index({
keep.narrow(/*dim=*/0, /*start=*/0, /*length=*/num_to_keep).to(
order_t.device(), keep.scalar_type())
}).sort(0, false));
}

View File

@ -1,35 +1,35 @@
def parse_model_cfg(path):
"""Parses the yolo-v3 layer configuration file and returns module definitions"""
file = open(path, 'r')
lines = file.read().split('\n')
lines = [x for x in lines if x and not x.startswith('#')]
lines = [x.rstrip().lstrip() for x in lines] # get rid of fringe whitespaces
module_defs = []
for line in lines:
if line.startswith('['): # This marks the start of a new block
module_defs.append({})
module_defs[-1]['type'] = line[1:-1].rstrip()
if module_defs[-1]['type'] == 'convolutional':
module_defs[-1]['batch_normalize'] = 0
else:
key, value = line.split("=")
value = value.strip()
module_defs[-1][key.rstrip()] = value.strip()
return module_defs
def parse_data_cfg(path):
"""Parses the data configuration file"""
options = dict()
options['gpus'] = '0'
options['num_workers'] = '10'
with open(path, 'r') as fp:
lines = fp.readlines()
for line in lines:
line = line.strip()
if line == '' or line.startswith('#'):
continue
key, value = line.split('=')
options[key.strip()] = value.strip()
return options
def parse_model_cfg(path):
"""Parses the yolo-v3 layer configuration file and returns module definitions"""
file = open(path, 'r')
lines = file.read().split('\n')
lines = [x for x in lines if x and not x.startswith('#')]
lines = [x.rstrip().lstrip() for x in lines] # get rid of fringe whitespaces
module_defs = []
for line in lines:
if line.startswith('['): # This marks the start of a new block
module_defs.append({})
module_defs[-1]['type'] = line[1:-1].rstrip()
if module_defs[-1]['type'] == 'convolutional':
module_defs[-1]['batch_normalize'] = 0
else:
key, value = line.split("=")
value = value.strip()
module_defs[-1][key.rstrip()] = value.strip()
return module_defs
def parse_data_cfg(path):
"""Parses the data configuration file"""
options = dict()
options['gpus'] = '0'
options['num_workers'] = '10'
with open(path, 'r') as fp:
lines = fp.readlines()
for line in lines:
line = line.strip()
if line == '' or line.startswith('#'):
continue
key, value = line.split('=')
options[key.strip()] = value.strip()
return options

View File

@ -1,45 +1,45 @@
# --------------------------------------------------------
# Fast R-CNN
# Copyright (c) 2015 Microsoft
# Licensed under The MIT License [see LICENSE for details]
# Written by Ross Girshick
# --------------------------------------------------------
import time
class Timer(object):
"""A simple timer."""
def __init__(self):
self.total_time = 0.
self.calls = 0
self.start_time = 0.
self.diff = 0.
self.average_time = 0.
self.duration = 0.
def tic(self):
# using time.time instead of time.clock because time time.clock
# does not normalize for multithreading
self.start_time = time.time()
def toc(self, average=True):
self.diff = time.time() - self.start_time
self.total_time += self.diff
self.calls += 1
self.average_time = self.total_time / self.calls
if average:
self.duration = self.average_time
else:
self.duration = self.diff
return self.duration
def clear(self):
self.total_time = 0.
self.calls = 0
self.start_time = 0.
self.diff = 0.
self.average_time = 0.
self.duration = 0.
# --------------------------------------------------------
# Fast R-CNN
# Copyright (c) 2015 Microsoft
# Licensed under The MIT License [see LICENSE for details]
# Written by Ross Girshick
# --------------------------------------------------------
import time
class Timer(object):
"""A simple timer."""
def __init__(self):
self.total_time = 0.
self.calls = 0
self.start_time = 0.
self.diff = 0.
self.average_time = 0.
self.duration = 0.
def tic(self):
# using time.time instead of time.clock because time time.clock
# does not normalize for multithreading
self.start_time = time.time()
def toc(self, average=True):
self.diff = time.time() - self.start_time
self.total_time += self.diff
self.calls += 1
self.average_time = self.total_time / self.calls
if average:
self.duration = self.average_time
else:
self.duration = self.diff
return self.duration
def clear(self):
self.total_time = 0.
self.calls = 0
self.start_time = 0.
self.diff = 0.
self.average_time = 0.
self.duration = 0.

File diff suppressed because it is too large Load Diff

View File

@ -1,90 +1,90 @@
import numpy as np
import cv2
def tlwhs_to_tlbrs(tlwhs):
tlbrs = np.copy(tlwhs)
if len(tlbrs) == 0:
return tlbrs
tlbrs[:, 2] += tlwhs[:, 0]
tlbrs[:, 3] += tlwhs[:, 1]
return tlbrs
def get_color(idx):
idx = idx * 3
color = ((37 * idx) % 255, (17 * idx) % 255, (29 * idx) % 255)
return color
def resize_image(image, max_size=800):
if max(image.shape[:2]) > max_size:
scale = float(max_size) / max(image.shape[:2])
image = cv2.resize(image, None, fx=scale, fy=scale)
return image
def plot_tracking(image, tlwhs, obj_ids, scores=None, frame_id=0, fps=0., ids2=None):
im = np.ascontiguousarray(np.copy(image))
im_h, im_w = im.shape[:2]
top_view = np.zeros([im_w, im_w, 3], dtype=np.uint8) + 255
text_scale = max(1, image.shape[1] / 1600.)
text_thickness = 1 if text_scale > 1.1 else 1
line_thickness = max(1, int(image.shape[1] / 500.))
radius = max(5, int(im_w/140.))
cv2.putText(im, 'frame: %d fps: %.2f num: %d' % (frame_id, fps, len(tlwhs)),
(0, int(15 * text_scale)), cv2.FONT_HERSHEY_PLAIN, text_scale, (0, 0, 255), thickness=2)
for i, tlwh in enumerate(tlwhs):
x1, y1, w, h = tlwh
intbox = tuple(map(int, (x1, y1, x1 + w, y1 + h)))
obj_id = int(obj_ids[i])
id_text = '{}'.format(int(obj_id))
if ids2 is not None:
id_text = id_text + ', {}'.format(int(ids2[i]))
_line_thickness = 1 if obj_id <= 0 else line_thickness
color = get_color(abs(obj_id))
cv2.rectangle(im, intbox[0:2], intbox[2:4], color=color, thickness=line_thickness)
cv2.putText(im, id_text, (intbox[0], intbox[1] + 30), cv2.FONT_HERSHEY_PLAIN, text_scale, (0, 0, 255),
thickness=text_thickness)
return im
def plot_trajectory(image, tlwhs, track_ids):
image = image.copy()
for one_tlwhs, track_id in zip(tlwhs, track_ids):
color = get_color(int(track_id))
for tlwh in one_tlwhs:
x1, y1, w, h = tuple(map(int, tlwh))
cv2.circle(image, (int(x1 + 0.5 * w), int(y1 + h)), 2, color, thickness=2)
return image
def plot_detections(image, tlbrs, scores=None, color=(255, 0, 0), ids=None):
im = np.copy(image)
text_scale = max(1, image.shape[1] / 800.)
thickness = 2 if text_scale > 1.3 else 1
for i, det in enumerate(tlbrs):
x1, y1, x2, y2 = np.asarray(det[:4], dtype=np.int)
if len(det) >= 7:
label = 'det' if det[5] > 0 else 'trk'
if ids is not None:
text = '{}# {:.2f}: {:d}'.format(label, det[6], ids[i])
cv2.putText(im, text, (x1, y1 + 30), cv2.FONT_HERSHEY_PLAIN, text_scale, (0, 255, 255),
thickness=thickness)
else:
text = '{}# {:.2f}'.format(label, det[6])
if scores is not None:
text = '{:.2f}'.format(scores[i])
cv2.putText(im, text, (x1, y1 + 30), cv2.FONT_HERSHEY_PLAIN, text_scale, (0, 255, 255),
thickness=thickness)
cv2.rectangle(im, (x1, y1), (x2, y2), color, 2)
return im
import numpy as np
import cv2
def tlwhs_to_tlbrs(tlwhs):
tlbrs = np.copy(tlwhs)
if len(tlbrs) == 0:
return tlbrs
tlbrs[:, 2] += tlwhs[:, 0]
tlbrs[:, 3] += tlwhs[:, 1]
return tlbrs
def get_color(idx):
idx = idx * 3
color = ((37 * idx) % 255, (17 * idx) % 255, (29 * idx) % 255)
return color
def resize_image(image, max_size=800):
if max(image.shape[:2]) > max_size:
scale = float(max_size) / max(image.shape[:2])
image = cv2.resize(image, None, fx=scale, fy=scale)
return image
def plot_tracking(image, tlwhs, obj_ids, scores=None, frame_id=0, fps=0., ids2=None):
im = np.ascontiguousarray(np.copy(image))
im_h, im_w = im.shape[:2]
top_view = np.zeros([im_w, im_w, 3], dtype=np.uint8) + 255
text_scale = max(1, image.shape[1] / 1600.)
text_thickness = 1 if text_scale > 1.1 else 1
line_thickness = max(1, int(image.shape[1] / 500.))
radius = max(5, int(im_w/140.))
cv2.putText(im, 'frame: %d fps: %.2f num: %d' % (frame_id, fps, len(tlwhs)),
(0, int(15 * text_scale)), cv2.FONT_HERSHEY_PLAIN, text_scale, (0, 0, 255), thickness=2)
for i, tlwh in enumerate(tlwhs):
x1, y1, w, h = tlwh
intbox = tuple(map(int, (x1, y1, x1 + w, y1 + h)))
obj_id = int(obj_ids[i])
id_text = '{}'.format(int(obj_id))
if ids2 is not None:
id_text = id_text + ', {}'.format(int(ids2[i]))
_line_thickness = 1 if obj_id <= 0 else line_thickness
color = get_color(abs(obj_id))
cv2.rectangle(im, intbox[0:2], intbox[2:4], color=color, thickness=line_thickness)
cv2.putText(im, id_text, (intbox[0], intbox[1] + 30), cv2.FONT_HERSHEY_PLAIN, text_scale, (0, 0, 255),
thickness=text_thickness)
return im
def plot_trajectory(image, tlwhs, track_ids):
image = image.copy()
for one_tlwhs, track_id in zip(tlwhs, track_ids):
color = get_color(int(track_id))
for tlwh in one_tlwhs:
x1, y1, w, h = tuple(map(int, tlwh))
cv2.circle(image, (int(x1 + 0.5 * w), int(y1 + h)), 2, color, thickness=2)
return image
def plot_detections(image, tlbrs, scores=None, color=(255, 0, 0), ids=None):
im = np.copy(image)
text_scale = max(1, image.shape[1] / 800.)
thickness = 2 if text_scale > 1.3 else 1
for i, det in enumerate(tlbrs):
x1, y1, x2, y2 = np.asarray(det[:4], dtype=np.int)
if len(det) >= 7:
label = 'det' if det[5] > 0 else 'trk'
if ids is not None:
text = '{}# {:.2f}: {:d}'.format(label, det[6], ids[i])
cv2.putText(im, text, (x1, y1 + 30), cv2.FONT_HERSHEY_PLAIN, text_scale, (0, 255, 255),
thickness=thickness)
else:
text = '{}# {:.2f}'.format(label, det[6])
if scores is not None:
text = '{:.2f}'.format(scores[i])
cv2.putText(im, text, (x1, y1 + 30), cv2.FONT_HERSHEY_PLAIN, text_scale, (0, 255, 255),
thickness=thickness)
cv2.rectangle(im, (x1, y1), (x2, y2), color, 2)
return im