fix a bug in track.py
add syncbn Update README.md Update README.md cl Update README.md Update README.md Update README.md
This commit is contained in:
parent
a830e47ecd
commit
a4b81205cf
5 changed files with 16 additions and 5 deletions
14
README.md
14
README.md
|
@ -1,8 +1,18 @@
|
|||
# Towards-Realtime-MOT
|
||||
**NOTE:** Still in progress, will update constantly, thank you for your attention!
|
||||
|
||||
## Introduction
|
||||
This repo is the a codebase of the Joint Detection and Embedding (JDE) model. JDE is a fast and high-performance multiple-object tracker that learns the object detection task and appearance embedding task simutaneously in a shared neural network. Techical details are described in our [arXiv preprint paper](https://arxiv.org). By using this repo, you can simply achieve **MOTA 64%+** on the "private" protocol of [MOT-16 challenge](https://motchallenge.net/tracker/JDE), and with a near real-time speed at **18~24 FPS** (Note this speed is for the entire system, including the detection step! ) .
|
||||
|
||||
## Installation
|
||||
We hope this repo will help researches/engineers to develop more practical MOT systems. For algorithm development, we provide training data, baseline models and evaluation methods to make a level playground. For application usage, we also provide a small video demo that takes raw videos as input without any bells and whistles.
|
||||
|
||||
## Requirements
|
||||
* Python 3.6
|
||||
* [Pytorch](https://pytorch.org) >= 1.0.1
|
||||
* [syncbn](https://github.com/ytoon/Synchronized-BatchNorm-PyTorch) (Optional, compile and place it under utils/syncbn, or simply replace with nn.BatchNorm [here](https://github.com/Zhongdao/Towards-Realtime-MOT/blob/master/models.py#L12))
|
||||
* [maskrcnn-benchmark](https://github.com/facebookresearch/maskrcnn-benchmark) (Their GPU NMS is used in this project)
|
||||
* python-opencv
|
||||
* ffmpeg (Optional, used in the video demo)
|
||||
|
||||
## Video Demo
|
||||
|
||||
|
@ -16,3 +26,5 @@
|
|||
|
||||
## Train with custom datasets
|
||||
|
||||
## Acknowledgement
|
||||
A large portion of code is borrowed from [ultralytics/yolov3](https://github.com/ultralytics/yolov3) and [longcw/MOTDT](https://github.com/longcw/MOTDT), many thanks to their wonderful work!
|
||||
|
|
|
@ -185,7 +185,7 @@ class YOLOLayer(nn.Module):
|
|||
#loss = torch.exp(-self.s_r)*lbox + torch.exp(-self.s_c)*lconf + torch.exp(-self.s_id)*lid + \
|
||||
# (self.s_r + self.s_c + self.s_id)
|
||||
#loss *= 0.5
|
||||
loss = 1*lbox + 1*lconf + 1*lid
|
||||
loss = 0*lbox + 0*lconf + 1*lid
|
||||
|
||||
return loss, loss.item(), lbox.item(), lconf.item(), lid.item(), nT
|
||||
|
||||
|
|
1
syncbn
1
syncbn
|
@ -1 +0,0 @@
|
|||
Subproject commit 265a7059ebbd20c27a81c3d74d43773779fe70d7
|
1
test.py
1
test.py
|
@ -55,7 +55,6 @@ def test(
|
|||
outputs, mAPs, mR, mP, TP, confidence, pred_class, target_class, jdict = \
|
||||
[], [], [], [], [], [], [], [], []
|
||||
AP_accum, AP_accum_count = np.zeros(nC), np.zeros(nC)
|
||||
coco91class = coco80_to_coco91_class()
|
||||
for batch_i, (imgs, targets, paths, shapes, targets_len) in enumerate(dataloader):
|
||||
t = time.time()
|
||||
output = model(imgs.cuda())
|
||||
|
|
1
track.py
1
track.py
|
@ -38,6 +38,7 @@ def write_results(filename, results, data_type):
|
|||
|
||||
|
||||
def eval_seq(opt, dataloader, data_type, result_filename, save_dir=None, show_image=True, frame_rate=30):
|
||||
if save_dir:
|
||||
mkdir_if_missing(save_dir)
|
||||
tracker = JDETracker(opt, frame_rate=frame_rate)
|
||||
timer = Timer()
|
||||
|
|
Loading…
Reference in a new issue