enable smoother
This commit is contained in:
parent
c9f573fcdd
commit
af2c943673
5 changed files with 28 additions and 14 deletions
|
@ -195,6 +195,10 @@ frame_emitter_parser.add_argument("--video-src",
|
||||||
type=Path,
|
type=Path,
|
||||||
nargs='+',
|
nargs='+',
|
||||||
default=lambda: list(Path('../DATASETS/VIRAT_subset_0102x/').glob('*.mp4')))
|
default=lambda: list(Path('../DATASETS/VIRAT_subset_0102x/').glob('*.mp4')))
|
||||||
|
frame_emitter_parser.add_argument("--video-offset",
|
||||||
|
help="Start playback from given frame. Note that when src is an array, this applies to all videos individually.",
|
||||||
|
default=None,
|
||||||
|
type=int)
|
||||||
#TODO: camera as source
|
#TODO: camera as source
|
||||||
|
|
||||||
frame_emitter_parser.add_argument("--video-loop",
|
frame_emitter_parser.add_argument("--video-loop",
|
||||||
|
|
|
@ -150,6 +150,12 @@ class FrameEmitter:
|
||||||
target_frame_duration = 1./fps
|
target_frame_duration = 1./fps
|
||||||
logger.info(f"Emit frames at {fps} fps")
|
logger.info(f"Emit frames at {fps} fps")
|
||||||
|
|
||||||
|
if self.config.video_offset:
|
||||||
|
logger.info(f"Start at frame {self.config.video_offset}")
|
||||||
|
video.set(cv2.CAP_PROP_POS_FRAMES, self.config.video_offset)
|
||||||
|
i = self.config.video_offset
|
||||||
|
|
||||||
|
|
||||||
if '-' in video_path.stem:
|
if '-' in video_path.stem:
|
||||||
path_stem = video_path.stem[:video_path.stem.rfind('-')]
|
path_stem = video_path.stem[:video_path.stem.rfind('-')]
|
||||||
else:
|
else:
|
||||||
|
|
|
@ -122,7 +122,7 @@ class PredictionServer:
|
||||||
logger.warning("Running on CPU. Specifying --eval_device cuda:0 should dramatically speed up prediction")
|
logger.warning("Running on CPU. Specifying --eval_device cuda:0 should dramatically speed up prediction")
|
||||||
|
|
||||||
if self.config.smooth_predictions:
|
if self.config.smooth_predictions:
|
||||||
self.smoother = Smoother()
|
self.smoother = Smoother(window_len=4)
|
||||||
|
|
||||||
context = zmq.Context()
|
context = zmq.Context()
|
||||||
self.trajectory_socket: zmq.Socket = context.socket(zmq.SUB)
|
self.trajectory_socket: zmq.Socket = context.socket(zmq.SUB)
|
||||||
|
|
|
@ -132,9 +132,10 @@ def decorate_frame(frame: Frame, prediction_frame: Frame, first_time) -> np.arra
|
||||||
# new_H = S * self.H * np.linalg.inv(S)
|
# new_H = S * self.H * np.linalg.inv(S)
|
||||||
# warpedFrame = cv2.warpPerspective(img, new_H, (1000,1000))
|
# warpedFrame = cv2.warpPerspective(img, new_H, (1000,1000))
|
||||||
# cv2.imwrite(str(self.config.output_dir / "orig.png"), warpedFrame)
|
# cv2.imwrite(str(self.config.output_dir / "orig.png"), warpedFrame)
|
||||||
|
cv2.rectangle(img, (0,0), (img.shape[1],25), (0,0,0), -1)
|
||||||
|
|
||||||
if not prediction_frame:
|
if not prediction_frame:
|
||||||
cv2.putText(img, f"Waiting for prediction...", (20,50), cv2.FONT_HERSHEY_PLAIN, 1, (255,255,0), 1)
|
cv2.putText(img, f"Waiting for prediction...", (20,20), cv2.FONT_HERSHEY_PLAIN, 1, (255,255,0), 1)
|
||||||
# continue
|
# continue
|
||||||
else:
|
else:
|
||||||
inv_H = np.linalg.pinv(prediction_frame.H)
|
inv_H = np.linalg.pinv(prediction_frame.H)
|
||||||
|
@ -151,7 +152,8 @@ def decorate_frame(frame: Frame, prediction_frame: Frame, first_time) -> np.arra
|
||||||
for ci in range(1, len(coords)):
|
for ci in range(1, len(coords)):
|
||||||
start = [int(p) for p in coords[ci-1]]
|
start = [int(p) for p in coords[ci-1]]
|
||||||
end = [int(p) for p in coords[ci]]
|
end = [int(p) for p in coords[ci]]
|
||||||
color = (255,255,255) if confirmations[ci] else (100,100,100)
|
# color = (255,255,255) if confirmations[ci] else (100,100,100)
|
||||||
|
color = [100+155*ci/len(coords)]*3
|
||||||
cv2.line(img, start, end, color, 2, lineType=cv2.LINE_AA)
|
cv2.line(img, start, end, color, 2, lineType=cv2.LINE_AA)
|
||||||
|
|
||||||
if not track.predictions or not len(track.predictions):
|
if not track.predictions or not len(track.predictions):
|
||||||
|
@ -180,17 +182,17 @@ def decorate_frame(frame: Frame, prediction_frame: Frame, first_time) -> np.arra
|
||||||
cv2.putText(img, f"{track_id} ({(track.history[-1].conf or 0):.2f})", (center[0]+8, center[1]), cv2.FONT_HERSHEY_SIMPLEX, fontScale=.7, thickness=2, color=(0,255,0), lineType=cv2.LINE_AA)
|
cv2.putText(img, f"{track_id} ({(track.history[-1].conf or 0):.2f})", (center[0]+8, center[1]), cv2.FONT_HERSHEY_SIMPLEX, fontScale=.7, thickness=2, color=(0,255,0), lineType=cv2.LINE_AA)
|
||||||
|
|
||||||
|
|
||||||
cv2.putText(img, f"{frame.index:06d}", (20,50), cv2.FONT_HERSHEY_PLAIN, 1, (255,255,0), 1)
|
cv2.putText(img, f"{frame.index:06d}", (20,20), cv2.FONT_HERSHEY_PLAIN, 1, (255,255,0), 1)
|
||||||
cv2.putText(img, f"{frame.time - first_time:.3f}s", (120,50), cv2.FONT_HERSHEY_PLAIN, 1, (255,255,0), 1)
|
cv2.putText(img, f"{frame.time - first_time:.3f}s", (120,20), cv2.FONT_HERSHEY_PLAIN, 1, (255,255,0), 1)
|
||||||
|
|
||||||
if prediction_frame:
|
if prediction_frame:
|
||||||
# render Δt and Δ frames
|
# render Δt and Δ frames
|
||||||
cv2.putText(img, f"{prediction_frame.index - frame.index}", (90,50), cv2.FONT_HERSHEY_PLAIN, 1, (0,0,255), 1)
|
cv2.putText(img, f"{prediction_frame.index - frame.index}", (90,20), cv2.FONT_HERSHEY_PLAIN, 1, (0,0,255), 1)
|
||||||
cv2.putText(img, f"{prediction_frame.time - time.time():.2f}s", (200,50), cv2.FONT_HERSHEY_PLAIN, 1, (0,0,255), 1)
|
cv2.putText(img, f"{prediction_frame.time - time.time():.2f}s", (200,20), cv2.FONT_HERSHEY_PLAIN, 1, (0,0,255), 1)
|
||||||
cv2.putText(img, f"{len(prediction_frame.tracks)} tracks", (500,50), cv2.FONT_HERSHEY_PLAIN, 1, (0,0,255), 1)
|
cv2.putText(img, f"{len(prediction_frame.tracks)} tracks", (500,20), cv2.FONT_HERSHEY_PLAIN, 1, (0,0,255), 1)
|
||||||
cv2.putText(img, f"h: {np.average([len(t.history or []) for t in prediction_frame.tracks.values()])}", (580, 50), cv2.FONT_HERSHEY_PLAIN, 1, (0,0,255), 1)
|
cv2.putText(img, f"h: {np.average([len(t.history or []) for t in prediction_frame.tracks.values()])}", (580,20), cv2.FONT_HERSHEY_PLAIN, 1, (0,0,255), 1)
|
||||||
cv2.putText(img, f"ph: {np.average([len(t.predictor_history or []) for t in prediction_frame.tracks.values()])}", (660, 50), cv2.FONT_HERSHEY_PLAIN, 1, (0,0,255), 1)
|
cv2.putText(img, f"ph: {np.average([len(t.predictor_history or []) for t in prediction_frame.tracks.values()])}", (660,20), cv2.FONT_HERSHEY_PLAIN, 1, (0,0,255), 1)
|
||||||
cv2.putText(img, f"p: {np.average([len(t.predictions or []) for t in prediction_frame.tracks.values()])}", (740, 50), cv2.FONT_HERSHEY_PLAIN, 1, (0,0,255), 1)
|
cv2.putText(img, f"p: {np.average([len(t.predictions or []) for t in prediction_frame.tracks.values()])}", (740,20), cv2.FONT_HERSHEY_PLAIN, 1, (0,0,255), 1)
|
||||||
|
|
||||||
return img
|
return img
|
||||||
|
|
||||||
|
|
|
@ -26,6 +26,7 @@ from trap.frame_emitter import DetectionState, Frame, Detection, Track
|
||||||
|
|
||||||
|
|
||||||
from tsmoothie.smoother import KalmanSmoother, ConvolutionSmoother
|
from tsmoothie.smoother import KalmanSmoother, ConvolutionSmoother
|
||||||
|
import tsmoothie.smoother
|
||||||
|
|
||||||
# Detection = [int, int, int, int, float, int]
|
# Detection = [int, int, int, int, float, int]
|
||||||
# Detections = [Detection]
|
# Detections = [Detection]
|
||||||
|
@ -322,8 +323,9 @@ def run_tracker(config: Namespace, is_running: Event):
|
||||||
|
|
||||||
class Smoother:
|
class Smoother:
|
||||||
|
|
||||||
def __init__(self):
|
def __init__(self, window_len=2):
|
||||||
self.smoother = ConvolutionSmoother(window_len=20, window_type='ones', copy=None)
|
self.smoother = ConvolutionSmoother(window_len=window_len, window_type='ones', copy=None)
|
||||||
|
|
||||||
|
|
||||||
def smooth_frame_tracks(self, frame: Frame) -> Frame:
|
def smooth_frame_tracks(self, frame: Frame) -> Frame:
|
||||||
new_tracks = []
|
new_tracks = []
|
||||||
|
|
Loading…
Reference in a new issue