Added docstring. (#92)

This commit is contained in:
Falak 2020-02-17 12:38:45 +05:30 committed by GitHub
parent f03efb2478
commit a23837c39c
No known key found for this signature in database
GPG key ID: 4AEE18F83AFDEB23

36
demo.py
View file

@ -1,3 +1,29 @@
"""Demo file for running the JDE tracker on custom video sequences for pedestrian tracking.
This file is the entry point to running the tracker on custom video sequences. It loads images from the provided video sequence, uses the JDE tracker for inference and outputs the video with bounding boxes indicating pedestrians. The bounding boxes also have associated ids (shown in different colours) to keep track of the movement of each individual.
Examples:
$ python demo.py --input-video path/to/your/input/video --weights path/to/model/weights --output-root path/to/output/root
Attributes:
input-video (str): Path to the input video for tracking.
output-root (str): Output root path. default='results'
weights (str): Path from which to load the model weights. default='weights/latest.pt'
cfg (str): Path to the cfg file describing the model. default='cfg/yolov3.cfg'
iou-thres (float): IOU threshold for object to be classified as detected. default=0.5
conf-thres (float): Confidence threshold for detection to be classified as object. default=0.5
nms-thres (float): IOU threshold for performing non-max supression. default=0.4
min-box-area (float): Filter out boxes smaller than this area from detections. default=200
track-buffer (int): Size of the tracking buffer. default=30
output-format (str): Expected output format, can be video, or text. default='video'
Todo:
* Add compatibility for non-GPU machines (would run slow)
* More documentation
"""
import os
import os.path as osp
import cv2
@ -18,8 +44,9 @@ import torch
from track import eval_seq
def track(opt):
logger.setLevel(logging.INFO)
logger.setLevel(logging.INFO)
def track(opt):
result_root = opt.output_root if opt.output_root!='' else '.'
mkdir_if_missing(result_root)
@ -31,7 +58,7 @@ def track(opt):
accs = []
n_frame = 0
logger.info('start tracking...')
logger.info('Starting tracking...')
dataloader = datasets.LoadVideo(opt.input_video, opt.img_size)
result_filename = os.path.join(result_root, 'results.txt')
frame_rate = dataloader.frame_rate
@ -48,6 +75,7 @@ def track(opt):
cmd_str = 'ffmpeg -f image2 -i {}/%05d.jpg -c:v copy {}'.format(osp.join(result_root, 'frame'), output_video_path)
os.system(cmd_str)
if __name__ == '__main__':
parser = argparse.ArgumentParser(prog='demo.py')
parser.add_argument('--cfg', type=str, default='cfg/yolov3.cfg', help='cfg file path')
@ -58,7 +86,7 @@ if __name__ == '__main__':
parser.add_argument('--min-box-area', type=float, default=200, help='filter out tiny boxes')
parser.add_argument('--track-buffer', type=int, default=30, help='tracking buffer')
parser.add_argument('--input-video', type=str, help='path to the input video')
parser.add_argument('--output-format', type=str, default='video', help='expected output format, can be video, or text')
parser.add_argument('--output-format', type=str, default='video', choices=['video', 'text'], help='Expected output format. Video or text.')
parser.add_argument('--output-root', type=str, default='results', help='expected output root path')
opt = parser.parse_args()
print(opt, end='\n\n')