From a4b81205cfb2b15dccc9282fb97731f902ffa9f3 Mon Sep 17 00:00:00 2001 From: lynn Date: Sun, 29 Sep 2019 13:18:38 +0800 Subject: [PATCH] fix a bug in track.py add syncbn Update README.md Update README.md cl Update README.md Update README.md Update README.md --- README.md | 14 +++++++++++++- models.py | 2 +- syncbn | 1 - test.py | 1 - track.py | 3 ++- 5 files changed, 16 insertions(+), 5 deletions(-) delete mode 160000 syncbn diff --git a/README.md b/README.md index 34b6df4..fe340c7 100644 --- a/README.md +++ b/README.md @@ -1,8 +1,18 @@ # Towards-Realtime-MOT +**NOTE:** Still in progress, will update constantly, thank you for your attention! ## Introduction +This repo is the a codebase of the Joint Detection and Embedding (JDE) model. JDE is a fast and high-performance multiple-object tracker that learns the object detection task and appearance embedding task simutaneously in a shared neural network. Techical details are described in our [arXiv preprint paper](https://arxiv.org). By using this repo, you can simply achieve **MOTA 64%+** on the "private" protocol of [MOT-16 challenge](https://motchallenge.net/tracker/JDE), and with a near real-time speed at **18~24 FPS** (Note this speed is for the entire system, including the detection step! ) . -## Installation +We hope this repo will help researches/engineers to develop more practical MOT systems. For algorithm development, we provide training data, baseline models and evaluation methods to make a level playground. For application usage, we also provide a small video demo that takes raw videos as input without any bells and whistles. + +## Requirements +* Python 3.6 +* [Pytorch](https://pytorch.org) >= 1.0.1 +* [syncbn](https://github.com/ytoon/Synchronized-BatchNorm-PyTorch) (Optional, compile and place it under utils/syncbn, or simply replace with nn.BatchNorm [here](https://github.com/Zhongdao/Towards-Realtime-MOT/blob/master/models.py#L12)) +* [maskrcnn-benchmark](https://github.com/facebookresearch/maskrcnn-benchmark) (Their GPU NMS is used in this project) +* python-opencv +* ffmpeg (Optional, used in the video demo) ## Video Demo @@ -16,3 +26,5 @@ ## Train with custom datasets +## Acknowledgement +A large portion of code is borrowed from [ultralytics/yolov3](https://github.com/ultralytics/yolov3) and [longcw/MOTDT](https://github.com/longcw/MOTDT), many thanks to their wonderful work! diff --git a/models.py b/models.py index ffb622f..d783860 100644 --- a/models.py +++ b/models.py @@ -185,7 +185,7 @@ class YOLOLayer(nn.Module): #loss = torch.exp(-self.s_r)*lbox + torch.exp(-self.s_c)*lconf + torch.exp(-self.s_id)*lid + \ # (self.s_r + self.s_c + self.s_id) #loss *= 0.5 - loss = 1*lbox + 1*lconf + 1*lid + loss = 0*lbox + 0*lconf + 1*lid return loss, loss.item(), lbox.item(), lconf.item(), lid.item(), nT diff --git a/syncbn b/syncbn deleted file mode 160000 index 265a705..0000000 --- a/syncbn +++ /dev/null @@ -1 +0,0 @@ -Subproject commit 265a7059ebbd20c27a81c3d74d43773779fe70d7 diff --git a/test.py b/test.py index e8f9d0c..409a8ab 100644 --- a/test.py +++ b/test.py @@ -55,7 +55,6 @@ def test( outputs, mAPs, mR, mP, TP, confidence, pred_class, target_class, jdict = \ [], [], [], [], [], [], [], [], [] AP_accum, AP_accum_count = np.zeros(nC), np.zeros(nC) - coco91class = coco80_to_coco91_class() for batch_i, (imgs, targets, paths, shapes, targets_len) in enumerate(dataloader): t = time.time() output = model(imgs.cuda()) diff --git a/track.py b/track.py index 02a2a95..191f9d5 100644 --- a/track.py +++ b/track.py @@ -38,7 +38,8 @@ def write_results(filename, results, data_type): def eval_seq(opt, dataloader, data_type, result_filename, save_dir=None, show_image=True, frame_rate=30): - mkdir_if_missing(save_dir) + if save_dir: + mkdir_if_missing(save_dir) tracker = JDETracker(opt, frame_rate=frame_rate) timer = Timer() results = []