Compare commits
No commits in common. "53c18d9a7b343dafb52e488146ca133f9ffaf40c" and "89076650c473807f540b9f622ca730ef4d5acc22" have entirely different histories.
53c18d9a7b
...
89076650c4
5 changed files with 19 additions and 58 deletions
|
@ -183,22 +183,22 @@ inference_parser.add_argument("--z-mode",
|
||||||
connection_parser.add_argument('--zmq-trajectory-addr',
|
connection_parser.add_argument('--zmq-trajectory-addr',
|
||||||
help='Manually specity communication addr for the trajectory messages',
|
help='Manually specity communication addr for the trajectory messages',
|
||||||
type=str,
|
type=str,
|
||||||
default="ipc:///tmp/feeds_traj")
|
default="ipc:///tmp/feeds/traj")
|
||||||
|
|
||||||
connection_parser.add_argument('--zmq-camera-stream-addr',
|
connection_parser.add_argument('--zmq-camera-stream-addr',
|
||||||
help='Manually specity communication addr for the camera stream messages',
|
help='Manually specity communication addr for the camera stream messages',
|
||||||
type=str,
|
type=str,
|
||||||
default="ipc:///tmp/feeds_img")
|
default="ipc:///tmp/feeds/img")
|
||||||
|
|
||||||
connection_parser.add_argument('--zmq-prediction-addr',
|
connection_parser.add_argument('--zmq-prediction-addr',
|
||||||
help='Manually specity communication addr for the prediction messages',
|
help='Manually specity communication addr for the prediction messages',
|
||||||
type=str,
|
type=str,
|
||||||
default="ipc:///tmp/feeds_preds")
|
default="ipc:///tmp/feeds/preds")
|
||||||
|
|
||||||
connection_parser.add_argument('--zmq-frame-addr',
|
connection_parser.add_argument('--zmq-frame-addr',
|
||||||
help='Manually specity communication addr for the frame messages',
|
help='Manually specity communication addr for the frame messages',
|
||||||
type=str,
|
type=str,
|
||||||
default="ipc:///tmp/feeds_frame")
|
default="ipc:///tmp/feeds/frame")
|
||||||
|
|
||||||
|
|
||||||
connection_parser.add_argument('--ws-port',
|
connection_parser.add_argument('--ws-port',
|
||||||
|
@ -252,9 +252,6 @@ tracker_parser.add_argument("--smooth-tracks",
|
||||||
render_parser.add_argument("--render-file",
|
render_parser.add_argument("--render-file",
|
||||||
help="Render a video file previewing the prediction, and its delay compared to the current frame",
|
help="Render a video file previewing the prediction, and its delay compared to the current frame",
|
||||||
action='store_true')
|
action='store_true')
|
||||||
render_parser.add_argument("--render-window",
|
|
||||||
help="Render a previewing to a window",
|
|
||||||
action='store_true')
|
|
||||||
|
|
||||||
render_parser.add_argument("--render-url",
|
render_parser.add_argument("--render-url",
|
||||||
help="""Stream renderer on given URL. Two easy approaches:
|
help="""Stream renderer on given URL. Two easy approaches:
|
||||||
|
|
|
@ -145,15 +145,6 @@ class FrameEmitter:
|
||||||
i = 0
|
i = 0
|
||||||
for video_path in self.video_srcs:
|
for video_path in self.video_srcs:
|
||||||
logger.info(f"Play from '{str(video_path)}'")
|
logger.info(f"Play from '{str(video_path)}'")
|
||||||
if str(video_path).isdigit():
|
|
||||||
# numeric input is a CV camera
|
|
||||||
video = cv2.VideoCapture(int(str(video_path)))
|
|
||||||
# TODO: make config variables
|
|
||||||
video.set(cv2.CAP_PROP_FRAME_WIDTH, int(1280))
|
|
||||||
video.set(cv2.CAP_PROP_FRAME_HEIGHT, int(720))
|
|
||||||
print("exposure!", video.get(cv2.CAP_PROP_AUTO_EXPOSURE))
|
|
||||||
video.set(cv2.CAP_PROP_FPS, 5)
|
|
||||||
else:
|
|
||||||
video = cv2.VideoCapture(str(video_path))
|
video = cv2.VideoCapture(str(video_path))
|
||||||
fps = video.get(cv2.CAP_PROP_FPS)
|
fps = video.get(cv2.CAP_PROP_FPS)
|
||||||
target_frame_duration = 1./fps
|
target_frame_duration = 1./fps
|
||||||
|
|
|
@ -73,7 +73,7 @@ def start():
|
||||||
ExceptionHandlingProcess(target=run_tracker, kwargs={'config': args, 'is_running': isRunning}, name='tracker'),
|
ExceptionHandlingProcess(target=run_tracker, kwargs={'config': args, 'is_running': isRunning}, name='tracker'),
|
||||||
]
|
]
|
||||||
|
|
||||||
if args.render_file or args.render_url or args.render_window:
|
if args.render_file or args.render_url:
|
||||||
procs.append(
|
procs.append(
|
||||||
ExceptionHandlingProcess(target=run_renderer, kwargs={'config': args, 'is_running': isRunning}, name='renderer')
|
ExceptionHandlingProcess(target=run_renderer, kwargs={'config': args, 'is_running': isRunning}, name='renderer')
|
||||||
)
|
)
|
||||||
|
|
|
@ -326,18 +326,13 @@ class PredictionServer:
|
||||||
start = time.time()
|
start = time.time()
|
||||||
with warnings.catch_warnings():
|
with warnings.catch_warnings():
|
||||||
warnings.simplefilter('ignore') # prevent deluge of UserWarning from torch's rrn.py
|
warnings.simplefilter('ignore') # prevent deluge of UserWarning from torch's rrn.py
|
||||||
|
|
||||||
# in the OnlineMultimodalGenerativeCVAE (see trajectron.model.online_mgcvae.py) each node's distribution
|
|
||||||
# is put stored in self.latent.p_dist by OnlineMultimodalGenerativeCVAE.p_z_x(). Type: torch.distributions.OneHotCategorical
|
|
||||||
# Later sampling in discrete_latent.py: DiscreteLatent.sample_p()
|
|
||||||
dists, preds = trajectron.incremental_forward(input_dict,
|
dists, preds = trajectron.incremental_forward(input_dict,
|
||||||
maps,
|
maps,
|
||||||
prediction_horizon=self.config.prediction_horizon, # TODO: make variable
|
prediction_horizon=self.config.prediction_horizon, # TODO: make variable
|
||||||
num_samples=self.config.num_samples, # TODO: make variable
|
num_samples=self.config.num_samples, # TODO: make variable
|
||||||
full_dist=self.config.full_dist, # "The model’s full sampled output, where z and y are sampled sequentially"
|
full_dist=self.config.full_dist,
|
||||||
gmm_mode=self.config.gmm_mode, # "If True: The mode of the Gaussian Mixture Model (GMM) is sampled (see trajectron.model.mgcvae.py)"
|
gmm_mode=self.config.gmm_mode,
|
||||||
z_mode=self.config.z_mode # "Predictions from the model’s most-likely high-level latent behavior mode" (see trajecton.models.components.discrete_latent:sample_p(most_likely_z=z_mode))
|
z_mode=self.config.z_mode)
|
||||||
)
|
|
||||||
end = time.time()
|
end = time.time()
|
||||||
logger.debug("took %.2f s (= %.2f Hz) w/ %d nodes and %d edges" % (end - start,
|
logger.debug("took %.2f s (= %.2f Hz) w/ %d nodes and %d edges" % (end - start,
|
||||||
1. / (end - start), len(trajectron.nodes),
|
1. / (end - start), len(trajectron.nodes),
|
||||||
|
|
|
@ -81,10 +81,6 @@ class Renderer:
|
||||||
self.out_writer = self.start_writer() if self.config.render_file else None
|
self.out_writer = self.start_writer() if self.config.render_file else None
|
||||||
self.streaming_process = self.start_streaming() if self.config.render_url else None
|
self.streaming_process = self.start_streaming() if self.config.render_url else None
|
||||||
|
|
||||||
if self.config.render_window:
|
|
||||||
cv2.namedWindow("frame", cv2.WND_PROP_FULLSCREEN)
|
|
||||||
cv2.setWindowProperty("frame",cv2.WND_PROP_FULLSCREEN,cv2.WINDOW_FULLSCREEN)
|
|
||||||
|
|
||||||
def start_writer(self):
|
def start_writer(self):
|
||||||
if not self.config.output_dir.exists():
|
if not self.config.output_dir.exists():
|
||||||
raise FileNotFoundError("Path does not exist")
|
raise FileNotFoundError("Path does not exist")
|
||||||
|
@ -125,33 +121,18 @@ class Renderer:
|
||||||
|
|
||||||
|
|
||||||
def run(self):
|
def run(self):
|
||||||
frame = None
|
|
||||||
prediction_frame = None
|
prediction_frame = None
|
||||||
|
|
||||||
i=0
|
i=0
|
||||||
first_time = None
|
first_time = None
|
||||||
while self.is_running.is_set():
|
while self.is_running.is_set():
|
||||||
i+=1
|
i+=1
|
||||||
|
|
||||||
|
zmq_ev = self.frame_sock.poll(timeout=2000)
|
||||||
# zmq_ev = self.frame_sock.poll(timeout=2000)
|
if not zmq_ev:
|
||||||
# if not zmq_ev:
|
# when no data comes in, loop so that is_running is checked
|
||||||
# # when no data comes in, loop so that is_running is checked
|
|
||||||
# continue
|
|
||||||
|
|
||||||
try:
|
|
||||||
frame: Frame = self.frame_sock.recv_pyobj(zmq.NOBLOCK)
|
|
||||||
except zmq.ZMQError as e:
|
|
||||||
idx = frame.index if frame else "NONE"
|
|
||||||
logger.debug(f"reuse video frame {idx}")
|
|
||||||
else:
|
|
||||||
logger.debug(f'new video frame {frame.index}')
|
|
||||||
|
|
||||||
if frame is None:
|
|
||||||
# might need to wait a few iterations before first frame comes available
|
|
||||||
time.sleep(.1)
|
|
||||||
continue
|
continue
|
||||||
|
|
||||||
|
frame: Frame = self.frame_sock.recv_pyobj()
|
||||||
try:
|
try:
|
||||||
prediction_frame: Frame = self.prediction_sock.recv_pyobj(zmq.NOBLOCK)
|
prediction_frame: Frame = self.prediction_sock.recv_pyobj(zmq.NOBLOCK)
|
||||||
except zmq.ZMQError as e:
|
except zmq.ZMQError as e:
|
||||||
|
@ -160,7 +141,7 @@ class Renderer:
|
||||||
if first_time is None:
|
if first_time is None:
|
||||||
first_time = frame.time
|
first_time = frame.time
|
||||||
|
|
||||||
img = decorate_frame(frame, prediction_frame, first_time, self.config)
|
decorate_frame(frame, prediction_frame, first_time, self.config)
|
||||||
|
|
||||||
img_path = (self.config.output_dir / f"{i:05d}.png").resolve()
|
img_path = (self.config.output_dir / f"{i:05d}.png").resolve()
|
||||||
|
|
||||||
|
@ -168,12 +149,9 @@ class Renderer:
|
||||||
|
|
||||||
logger.debug(f"write frame {frame.time - first_time:.3f}s")
|
logger.debug(f"write frame {frame.time - first_time:.3f}s")
|
||||||
if self.out_writer:
|
if self.out_writer:
|
||||||
self.out_writer.write(img)
|
self.out_writer.write(frame.img)
|
||||||
if self.streaming_process:
|
if self.streaming_process:
|
||||||
self.streaming_process.stdin.write(img.tobytes())
|
self.streaming_process.stdin.write(frame.img.tobytes())
|
||||||
if self.config.render_window:
|
|
||||||
cv2.imshow('frame',img)
|
|
||||||
cv2.waitKey(1)
|
|
||||||
logger.info('Stopping')
|
logger.info('Stopping')
|
||||||
|
|
||||||
if i>2:
|
if i>2:
|
||||||
|
@ -203,8 +181,8 @@ def decorate_frame(frame: Frame, prediction_frame: Frame, first_time: float, con
|
||||||
# Fill image with red color(set each pixel to red)
|
# Fill image with red color(set each pixel to red)
|
||||||
overlay[:] = (130, 0, 75)
|
overlay[:] = (130, 0, 75)
|
||||||
|
|
||||||
img = cv2.addWeighted(frame.img, .4, overlay, .6, 0)
|
frame.img = cv2.addWeighted(frame.img, .4, overlay, .6, 0)
|
||||||
# img = frame.img.copy()
|
img = frame.img
|
||||||
|
|
||||||
# all not working:
|
# all not working:
|
||||||
# if i == 1:
|
# if i == 1:
|
||||||
|
|
Loading…
Reference in a new issue