WIP: update linear assignment.
- Fix for Scipy has a DeprecationWarning for linear_assignment_ module. - LAP scales better than Scipy for frames with 100s-1000s of objects.
This commit is contained in:
parent
54e63a7e43
commit
b69f2fc279
1 changed files with 77 additions and 56 deletions
133
sort.py
133
sort.py
|
@ -18,19 +18,24 @@
|
|||
from __future__ import print_function
|
||||
|
||||
from numba import jit
|
||||
import os.path
|
||||
import os.path as osp
|
||||
import numpy as np
|
||||
import matplotlib
|
||||
matplotlib.use('TkAgg')
|
||||
import matplotlib.pyplot as plt
|
||||
import matplotlib.patches as patches
|
||||
from skimage import io
|
||||
from sklearn.utils.linear_assignment_ import linear_assignment
|
||||
#from sklearn.utils.linear_assignment_ import linear_assignment
|
||||
import lap
|
||||
import glob
|
||||
import time
|
||||
import argparse
|
||||
from filterpy.kalman import KalmanFilter
|
||||
|
||||
np.random.seed(0)
|
||||
|
||||
@jit
|
||||
def iou(bb_test,bb_gt):
|
||||
def iou(bb_test, bb_gt):
|
||||
"""
|
||||
Computes IUO between two bboxes in the form [x1,y1,x2,y2]
|
||||
"""
|
||||
|
@ -41,8 +46,8 @@ def iou(bb_test,bb_gt):
|
|||
w = np.maximum(0., xx2 - xx1)
|
||||
h = np.maximum(0., yy2 - yy1)
|
||||
wh = w * h
|
||||
o = wh / ((bb_test[2]-bb_test[0])*(bb_test[3]-bb_test[1])
|
||||
+ (bb_gt[2]-bb_gt[0])*(bb_gt[3]-bb_gt[1]) - wh)
|
||||
o = wh / ((bb_test[2] - bb_test[0]) * (bb_test[3] - bb_test[1])
|
||||
+ (bb_gt[2] - bb_gt[0]) * (bb_gt[3] - bb_gt[1]) - wh)
|
||||
return(o)
|
||||
|
||||
def convert_bbox_to_z(bbox):
|
||||
|
@ -51,21 +56,21 @@ def convert_bbox_to_z(bbox):
|
|||
[x,y,s,r] where x,y is the centre of the box and s is the scale/area and r is
|
||||
the aspect ratio
|
||||
"""
|
||||
w = bbox[2]-bbox[0]
|
||||
h = bbox[3]-bbox[1]
|
||||
x = bbox[0]+w/2.
|
||||
y = bbox[1]+h/2.
|
||||
s = w*h #scale is just area
|
||||
r = w/float(h)
|
||||
return np.array([x,y,s,r]).reshape((4,1))
|
||||
w = bbox[2] - bbox[0]
|
||||
h = bbox[3] - bbox[1]
|
||||
x = bbox[0] + w/2.
|
||||
y = bbox[1] + h/2.
|
||||
s = w * h #scale is just area
|
||||
r = w / float(h)
|
||||
return np.array([x, y, s, r]).reshape((4, 1))
|
||||
|
||||
def convert_x_to_bbox(x,score=None):
|
||||
"""
|
||||
Takes a bounding box in the centre form [x,y,s,r] and returns it in the form
|
||||
[x1,y1,x2,y2] where x1,y1 is the top left and x2,y2 is the bottom right
|
||||
"""
|
||||
w = np.sqrt(x[2]*x[3])
|
||||
h = x[2]/w
|
||||
w = np.sqrt(x[2] * x[3])
|
||||
h = x[2] / w
|
||||
if(score==None):
|
||||
return np.array([x[0]-w/2.,x[1]-h/2.,x[0]+w/2.,x[1]+h/2.]).reshape((1,4))
|
||||
else:
|
||||
|
@ -74,7 +79,7 @@ def convert_x_to_bbox(x,score=None):
|
|||
|
||||
class KalmanBoxTracker(object):
|
||||
"""
|
||||
This class represents the internel state of individual tracked objects observed as bbox.
|
||||
This class represents the internal state of individual tracked objects observed as bbox.
|
||||
"""
|
||||
count = 0
|
||||
def __init__(self,bbox):
|
||||
|
@ -144,21 +149,34 @@ def associate_detections_to_trackers(detections,trackers,iou_threshold = 0.3):
|
|||
for d,det in enumerate(detections):
|
||||
for t,trk in enumerate(trackers):
|
||||
iou_matrix[d,t] = iou(det,trk)
|
||||
matched_indices = linear_assignment(-iou_matrix)
|
||||
|
||||
# TODO (bewley): remove rows and cols iou.max() < threshold
|
||||
|
||||
if min(iou_matrix.shape) > 0:
|
||||
a = (iou_matrix > iou_threshold).astype(np.int32)
|
||||
if a.sum(1).max() == 1 and a.sum(0).max() == 1:
|
||||
#matrix is #TODO(this doesnt provide much gains)
|
||||
matched_indices = np.stack(np.where(a), axis=1)
|
||||
else:
|
||||
_, x, y = lap.lapjv(-iou_matrix, extend_cost=True)
|
||||
matched_indices = np.array([[y[i],i] for i in x if i >= 0]) #
|
||||
else:
|
||||
matched_indices = np.empty(shape=(0,2))
|
||||
#matched_indices = linear_assignment(-iou_matrix)
|
||||
|
||||
unmatched_detections = []
|
||||
for d,det in enumerate(detections):
|
||||
for d, det in enumerate(detections):
|
||||
if(d not in matched_indices[:,0]):
|
||||
unmatched_detections.append(d)
|
||||
unmatched_trackers = []
|
||||
for t,trk in enumerate(trackers):
|
||||
for t, trk in enumerate(trackers):
|
||||
if(t not in matched_indices[:,1]):
|
||||
unmatched_trackers.append(t)
|
||||
|
||||
#filter out matched with low IOU
|
||||
matches = []
|
||||
for m in matched_indices:
|
||||
if(iou_matrix[m[0],m[1]]<iou_threshold):
|
||||
if(iou_matrix[m[0], m[1]]<iou_threshold):
|
||||
unmatched_detections.append(m[0])
|
||||
unmatched_trackers.append(m[1])
|
||||
else:
|
||||
|
@ -171,9 +189,8 @@ def associate_detections_to_trackers(detections,trackers,iou_threshold = 0.3):
|
|||
return matches, np.array(unmatched_detections), np.array(unmatched_trackers)
|
||||
|
||||
|
||||
|
||||
class Sort(object):
|
||||
def __init__(self,max_age=1,min_hits=3):
|
||||
def __init__(self, max_age=1, min_hits=3):
|
||||
"""
|
||||
Sets key parameters for SORT
|
||||
"""
|
||||
|
@ -182,96 +199,102 @@ class Sort(object):
|
|||
self.trackers = []
|
||||
self.frame_count = 0
|
||||
|
||||
def update(self,dets):
|
||||
def update(self, dets=np.empty((0, 5))):
|
||||
"""
|
||||
Params:
|
||||
dets - a numpy array of detections in the format [[x1,y1,x2,y2,score],[x1,y1,x2,y2,score],...]
|
||||
Requires: this method must be called once for each frame even with empty detections.
|
||||
Requires: this method must be called once for each frame even with empty detections (use np.empty((0, 5)) for frames without detections).
|
||||
Returns the a similar array, where the last column is the object ID.
|
||||
|
||||
NOTE: The number of objects returned may differ from the number of detections provided.
|
||||
"""
|
||||
self.frame_count += 1
|
||||
#get predicted locations from existing trackers.
|
||||
trks = np.zeros((len(self.trackers),5))
|
||||
# get predicted locations from existing trackers.
|
||||
trks = np.zeros((len(self.trackers), 5))
|
||||
to_del = []
|
||||
ret = []
|
||||
for t,trk in enumerate(trks):
|
||||
for t, trk in enumerate(trks):
|
||||
pos = self.trackers[t].predict()[0]
|
||||
trk[:] = [pos[0], pos[1], pos[2], pos[3], 0]
|
||||
if(np.any(np.isnan(pos))):
|
||||
if np.any(np.isnan(pos)):
|
||||
to_del.append(t)
|
||||
trks = np.ma.compress_rows(np.ma.masked_invalid(trks))
|
||||
for t in reversed(to_del):
|
||||
self.trackers.pop(t)
|
||||
matched, unmatched_dets, unmatched_trks = associate_detections_to_trackers(dets,trks)
|
||||
|
||||
#update matched trackers with assigned detections
|
||||
for t,trk in enumerate(self.trackers):
|
||||
if(t not in unmatched_trks):
|
||||
d = matched[np.where(matched[:,1]==t)[0],0]
|
||||
trk.update(dets[d,:][0])
|
||||
# update matched trackers with assigned detections
|
||||
for m in matched:
|
||||
self.trackers[m[1]].update(dets[m[0], :])
|
||||
#for t, trk in enumerate(self.trackers):
|
||||
# if(t not in unmatched_trks):
|
||||
# d = matched[np.where(matched[:,1]==t)[0],0]
|
||||
# trk.update(dets[d,:][0])
|
||||
|
||||
#create and initialise new trackers for unmatched detections
|
||||
# create and initialise new trackers for unmatched detections
|
||||
for i in unmatched_dets:
|
||||
trk = KalmanBoxTracker(dets[i,:])
|
||||
trk = KalmanBoxTracker(dets[i,:])
|
||||
self.trackers.append(trk)
|
||||
i = len(self.trackers)
|
||||
for trk in reversed(self.trackers):
|
||||
d = trk.get_state()[0]
|
||||
if((trk.time_since_update < 1) and (trk.hit_streak >= self.min_hits or self.frame_count <= self.min_hits)):
|
||||
if (trk.time_since_update < 1) and (trk.hit_streak >= self.min_hits or self.frame_count <= self.min_hits):
|
||||
ret.append(np.concatenate((d,[trk.id+1])).reshape(1,-1)) # +1 as MOT benchmark requires positive
|
||||
i -= 1
|
||||
#remove dead tracklet
|
||||
# remove dead tracklet
|
||||
if(trk.time_since_update > self.max_age):
|
||||
self.trackers.pop(i)
|
||||
if(len(ret)>0):
|
||||
return np.concatenate(ret)
|
||||
return np.empty((0,5))
|
||||
|
||||
|
||||
def parse_args():
|
||||
"""Parse input arguments."""
|
||||
parser = argparse.ArgumentParser(description='SORT demo')
|
||||
parser.add_argument('--display', dest='display', help='Display online tracker output (slow) [False]',action='store_true')
|
||||
parser.add_argument("--seq_path", help="Path to detections.", type=str, default='mot_benchmark')
|
||||
parser.add_argument("--phase", help="Subdirectory in seq_path.", type=str, default='train')
|
||||
args = parser.parse_args()
|
||||
return args
|
||||
|
||||
if __name__ == '__main__':
|
||||
# all train
|
||||
sequences = ['PETS09-S2L1','TUD-Campus','TUD-Stadtmitte','ETH-Bahnhof','ETH-Sunnyday','ETH-Pedcross2','KITTI-13','KITTI-17','ADL-Rundle-6','ADL-Rundle-8','Venice-2']
|
||||
args = parse_args()
|
||||
display = args.display
|
||||
phase = 'train'
|
||||
phase = args.phase
|
||||
total_time = 0.0
|
||||
total_frames = 0
|
||||
colours = np.random.rand(32,3) #used only for display
|
||||
colours = np.random.rand(32, 3) #used only for display
|
||||
if(display):
|
||||
if not os.path.exists('mot_benchmark'):
|
||||
if not osp.exists('mot_benchmark'):
|
||||
print('\n\tERROR: mot_benchmark link not found!\n\n Create a symbolic link to the MOT benchmark\n (https://motchallenge.net/data/2D_MOT_2015/#download). E.g.:\n\n $ ln -s /path/to/MOT2015_challenge/2DMOT2015 mot_benchmark\n\n')
|
||||
exit()
|
||||
plt.ion()
|
||||
fig = plt.figure()
|
||||
|
||||
if not os.path.exists('output'):
|
||||
fig = plt.figure()
|
||||
|
||||
if not osp.exists('output'):
|
||||
os.makedirs('output')
|
||||
|
||||
for seq in sequences:
|
||||
pattern = osp.join(args.seq_path, phase, '*', 'det', 'det.txt')
|
||||
for seq_dets_fn in glob.glob(pattern):
|
||||
mot_tracker = Sort() #create instance of the SORT tracker
|
||||
seq_dets = np.loadtxt('data/%s/det.txt'%(seq),delimiter=',') #load detections
|
||||
print(seq_dets_fn)
|
||||
seq_dets = np.loadtxt(seq_dets_fn, delimiter=',')
|
||||
seq = seq_dets_fn[pattern.find('*'):].split('/')[0]
|
||||
|
||||
with open('output/%s.txt'%(seq),'w') as out_file:
|
||||
print("Processing %s."%(seq))
|
||||
for frame in range(int(seq_dets[:,0].max())):
|
||||
frame += 1 #detection and frame numbers begin at 1
|
||||
dets = seq_dets[seq_dets[:,0]==frame,2:7]
|
||||
dets[:,2:4] += dets[:,0:2] #convert to [x1,y1,w,h] to [x1,y1,x2,y2]
|
||||
dets = seq_dets[seq_dets[:, 0]==frame, 2:7]
|
||||
dets[:, 2:4] += dets[:, 0:2] #convert to [x1,y1,w,h] to [x1,y1,x2,y2]
|
||||
total_frames += 1
|
||||
|
||||
if(display):
|
||||
ax1 = fig.add_subplot(111, aspect='equal')
|
||||
fn = 'mot_benchmark/%s/%s/img1/%06d.jpg'%(phase,seq,frame)
|
||||
fn = 'mot_benchmark/%s/%s/img1/%06d.jpg'%(phase, seq, frame)
|
||||
im =io.imread(fn)
|
||||
ax1.imshow(im)
|
||||
plt.title(seq+' Tracked Targets')
|
||||
plt.title(seq + ' Tracked Targets')
|
||||
|
||||
start_time = time.time()
|
||||
trackers = mot_tracker.update(dets)
|
||||
|
@ -283,16 +306,14 @@ if __name__ == '__main__':
|
|||
if(display):
|
||||
d = d.astype(np.int32)
|
||||
ax1.add_patch(patches.Rectangle((d[0],d[1]),d[2]-d[0],d[3]-d[1],fill=False,lw=3,ec=colours[d[4]%32,:]))
|
||||
ax1.set_adjustable('box-forced')
|
||||
#ax1.set_adjustable('box-forced')
|
||||
|
||||
if(display):
|
||||
fig.canvas.flush_events()
|
||||
plt.draw()
|
||||
ax1.cla()
|
||||
|
||||
print("Total Tracking took: %.3f for %d frames or %.1f FPS"%(total_time,total_frames,total_frames/total_time))
|
||||
print("Total Tracking took: %.3f for %d frames or %.1f FPS" % (total_time, total_frames, total_frames / total_time))
|
||||
|
||||
if(display):
|
||||
print("Note: to get real runtime results run without the option: --display")
|
||||
|
||||
|
||||
|
||||
|
|
Loading…
Reference in a new issue