Prettify the output
This commit is contained in:
parent
af2c943673
commit
8d9c7d3486
1 changed files with 49 additions and 17 deletions
|
@ -121,7 +121,25 @@ class Renderer:
|
||||||
# oddly wrapped, because both close and release() take time.
|
# oddly wrapped, because both close and release() take time.
|
||||||
self.streaming_process.wait()
|
self.streaming_process.wait()
|
||||||
|
|
||||||
|
# colorset = itertools.product([0,255], repeat=3) # but remove white
|
||||||
|
colorset = [(0, 0, 0),
|
||||||
|
(0, 0, 255),
|
||||||
|
(0, 255, 0),
|
||||||
|
(0, 255, 255),
|
||||||
|
(255, 0, 0),
|
||||||
|
(255, 0, 255),
|
||||||
|
(255, 255, 0)
|
||||||
|
]
|
||||||
|
|
||||||
|
|
||||||
def decorate_frame(frame: Frame, prediction_frame: Frame, first_time) -> np.array:
|
def decorate_frame(frame: Frame, prediction_frame: Frame, first_time) -> np.array:
|
||||||
|
frame.img
|
||||||
|
|
||||||
|
overlay = np.zeros(frame.img.shape, np.uint8)
|
||||||
|
# Fill image with red color(set each pixel to red)
|
||||||
|
overlay[:] = (128, 0, 128)
|
||||||
|
|
||||||
|
frame.img = cv2.addWeighted(frame.img, .5, overlay, .5, 0)
|
||||||
img = frame.img
|
img = frame.img
|
||||||
|
|
||||||
# all not working:
|
# all not working:
|
||||||
|
@ -154,45 +172,59 @@ def decorate_frame(frame: Frame, prediction_frame: Frame, first_time) -> np.arra
|
||||||
end = [int(p) for p in coords[ci]]
|
end = [int(p) for p in coords[ci]]
|
||||||
# color = (255,255,255) if confirmations[ci] else (100,100,100)
|
# color = (255,255,255) if confirmations[ci] else (100,100,100)
|
||||||
color = [100+155*ci/len(coords)]*3
|
color = [100+155*ci/len(coords)]*3
|
||||||
cv2.line(img, start, end, color, 2, lineType=cv2.LINE_AA)
|
cv2.line(img, start, end, color, 1, lineType=cv2.LINE_AA)
|
||||||
|
cv2.circle(img, end, 2, color, lineType=cv2.LINE_AA)
|
||||||
|
|
||||||
if not track.predictions or not len(track.predictions):
|
if not track.predictions or not len(track.predictions):
|
||||||
continue
|
continue
|
||||||
|
|
||||||
|
color = colorset[track_id % len(colorset)]
|
||||||
|
|
||||||
for pred_i, pred in enumerate(track.predictions):
|
for pred_i, pred in enumerate(track.predictions):
|
||||||
pred_coords = cv2.perspectiveTransform(np.array([pred]), inv_H)[0]
|
pred_coords = cv2.perspectiveTransform(np.array([pred]), inv_H)[0].tolist()
|
||||||
color = (0,0,255) if pred_i else (100,100,100)
|
# color = (128,0,128) if pred_i else (128,128,0)
|
||||||
for ci in range(1, len(pred_coords)):
|
|
||||||
start = [int(p) for p in pred_coords[ci-1]]
|
for ci in range(0, len(pred_coords)):
|
||||||
|
if ci == 0:
|
||||||
|
start = [int(p) for p in coords[-1]]
|
||||||
|
# start = [0,0]?
|
||||||
|
# print(start)
|
||||||
|
else:
|
||||||
|
start = [int(p) for p in pred_coords[ci-1]]
|
||||||
end = [int(p) for p in pred_coords[ci]]
|
end = [int(p) for p in pred_coords[ci]]
|
||||||
cv2.line(img, start, end, color, 1, lineType=cv2.LINE_AA)
|
cv2.line(img, start, end, color, 2, lineType=cv2.LINE_AA)
|
||||||
|
cv2.circle(img, end, 2, color, 1, lineType=cv2.LINE_AA)
|
||||||
|
|
||||||
for track_id, track in prediction_frame.tracks.items():
|
for track_id, track in prediction_frame.tracks.items():
|
||||||
# draw tracker marker and track id last so it lies over the trajectories
|
# draw tracker marker and track id last so it lies over the trajectories
|
||||||
# this goes is a second loop so it overlays over _all_ trajectories
|
# this goes is a second loop so it overlays over _all_ trajectories
|
||||||
# coords = cv2.perspectiveTransform(np.array([[track.history[-1].get_foot_coords()]]), self.inv_H)[0]
|
# coords = cv2.perspectiveTransform(np.array([[track.history[-1].get_foot_coords()]]), self.inv_H)[0]
|
||||||
coords = track.history[-1].get_foot_coords()
|
coords = track.history[-1].get_foot_coords()
|
||||||
|
color = colorset[track_id % len(colorset)]
|
||||||
|
|
||||||
center = [int(p) for p in coords]
|
center = [int(p) for p in coords]
|
||||||
cv2.circle(img, center, 5, (0,255,0))
|
cv2.circle(img, center, 6, (255,255,255), thickness=3)
|
||||||
(l, t, r, b) = track.history[-1].to_ltrb()
|
(l, t, r, b) = track.history[-1].to_ltrb()
|
||||||
p1 = (l, t)
|
p1 = (l, t)
|
||||||
p2 = (r, b)
|
p2 = (r, b)
|
||||||
cv2.rectangle(img, p1, p2, (255,0,0), 1)
|
# cv2.rectangle(img, p1, p2, (255,0,0), 1)
|
||||||
cv2.putText(img, f"{track_id} ({(track.history[-1].conf or 0):.2f})", (center[0]+8, center[1]), cv2.FONT_HERSHEY_SIMPLEX, fontScale=.7, thickness=2, color=(0,255,0), lineType=cv2.LINE_AA)
|
cv2.putText(img, f"{track_id} ({(track.history[-1].conf or 0):.2f})", (center[0]+8, center[1]), cv2.FONT_HERSHEY_SIMPLEX, fontScale=.7, thickness=1, color=color, lineType=cv2.LINE_AA)
|
||||||
|
|
||||||
|
|
||||||
cv2.putText(img, f"{frame.index:06d}", (20,20), cv2.FONT_HERSHEY_PLAIN, 1, (255,255,0), 1)
|
base_color = (255,)*3
|
||||||
cv2.putText(img, f"{frame.time - first_time:.3f}s", (120,20), cv2.FONT_HERSHEY_PLAIN, 1, (255,255,0), 1)
|
info_color = (255,255,0)
|
||||||
|
|
||||||
|
cv2.putText(img, f"{frame.index:06d}", (20,17), cv2.FONT_HERSHEY_PLAIN, 1, base_color, 1)
|
||||||
|
cv2.putText(img, f"{frame.time - first_time:.3f}s", (120,17), cv2.FONT_HERSHEY_PLAIN, 1, base_color, 1)
|
||||||
|
|
||||||
if prediction_frame:
|
if prediction_frame:
|
||||||
# render Δt and Δ frames
|
# render Δt and Δ frames
|
||||||
cv2.putText(img, f"{prediction_frame.index - frame.index}", (90,20), cv2.FONT_HERSHEY_PLAIN, 1, (0,0,255), 1)
|
cv2.putText(img, f"{prediction_frame.index - frame.index}", (90,17), cv2.FONT_HERSHEY_PLAIN, 1, info_color, 1)
|
||||||
cv2.putText(img, f"{prediction_frame.time - time.time():.2f}s", (200,20), cv2.FONT_HERSHEY_PLAIN, 1, (0,0,255), 1)
|
cv2.putText(img, f"{prediction_frame.time - time.time():.2f}s", (200,17), cv2.FONT_HERSHEY_PLAIN, 1, info_color, 1)
|
||||||
cv2.putText(img, f"{len(prediction_frame.tracks)} tracks", (500,20), cv2.FONT_HERSHEY_PLAIN, 1, (0,0,255), 1)
|
cv2.putText(img, f"{len(prediction_frame.tracks)} tracks", (500,17), cv2.FONT_HERSHEY_PLAIN, 1, base_color, 1)
|
||||||
cv2.putText(img, f"h: {np.average([len(t.history or []) for t in prediction_frame.tracks.values()])}", (580,20), cv2.FONT_HERSHEY_PLAIN, 1, (0,0,255), 1)
|
cv2.putText(img, f"h: {np.average([len(t.history or []) for t in prediction_frame.tracks.values()])}", (580,17), cv2.FONT_HERSHEY_PLAIN, 1, info_color, 1)
|
||||||
cv2.putText(img, f"ph: {np.average([len(t.predictor_history or []) for t in prediction_frame.tracks.values()])}", (660,20), cv2.FONT_HERSHEY_PLAIN, 1, (0,0,255), 1)
|
cv2.putText(img, f"ph: {np.average([len(t.predictor_history or []) for t in prediction_frame.tracks.values()])}", (660,17), cv2.FONT_HERSHEY_PLAIN, 1, info_color, 1)
|
||||||
cv2.putText(img, f"p: {np.average([len(t.predictions or []) for t in prediction_frame.tracks.values()])}", (740,20), cv2.FONT_HERSHEY_PLAIN, 1, (0,0,255), 1)
|
cv2.putText(img, f"p: {np.average([len(t.predictions or []) for t in prediction_frame.tracks.values()])}", (740,17), cv2.FONT_HERSHEY_PLAIN, 1, info_color, 1)
|
||||||
|
|
||||||
return img
|
return img
|
||||||
|
|
||||||
|
|
Loading…
Reference in a new issue