Compare commits

..

4 commits

Author SHA1 Message Date
Ruben van de Ven
53c18d9a7b Some comments on parameters 2024-06-20 19:21:44 +02:00
Ruben van de Ven
531d61b69a detach render FPS from frame emit 2024-06-20 12:32:05 +02:00
Ruben van de Ven
7f1c3d86f7 remove superfluous logging 2024-06-20 12:11:23 +02:00
Ruben van de Ven
d9547bb372 Render window option to preview without delay 2024-06-19 14:34:07 +02:00
5 changed files with 58 additions and 19 deletions

View file

@ -183,22 +183,22 @@ inference_parser.add_argument("--z-mode",
connection_parser.add_argument('--zmq-trajectory-addr',
help='Manually specity communication addr for the trajectory messages',
type=str,
default="ipc:///tmp/feeds/traj")
default="ipc:///tmp/feeds_traj")
connection_parser.add_argument('--zmq-camera-stream-addr',
help='Manually specity communication addr for the camera stream messages',
type=str,
default="ipc:///tmp/feeds/img")
default="ipc:///tmp/feeds_img")
connection_parser.add_argument('--zmq-prediction-addr',
help='Manually specity communication addr for the prediction messages',
type=str,
default="ipc:///tmp/feeds/preds")
default="ipc:///tmp/feeds_preds")
connection_parser.add_argument('--zmq-frame-addr',
help='Manually specity communication addr for the frame messages',
type=str,
default="ipc:///tmp/feeds/frame")
default="ipc:///tmp/feeds_frame")
connection_parser.add_argument('--ws-port',
@ -252,6 +252,9 @@ tracker_parser.add_argument("--smooth-tracks",
render_parser.add_argument("--render-file",
help="Render a video file previewing the prediction, and its delay compared to the current frame",
action='store_true')
render_parser.add_argument("--render-window",
help="Render a previewing to a window",
action='store_true')
render_parser.add_argument("--render-url",
help="""Stream renderer on given URL. Two easy approaches:

View file

@ -145,7 +145,16 @@ class FrameEmitter:
i = 0
for video_path in self.video_srcs:
logger.info(f"Play from '{str(video_path)}'")
video = cv2.VideoCapture(str(video_path))
if str(video_path).isdigit():
# numeric input is a CV camera
video = cv2.VideoCapture(int(str(video_path)))
# TODO: make config variables
video.set(cv2.CAP_PROP_FRAME_WIDTH, int(1280))
video.set(cv2.CAP_PROP_FRAME_HEIGHT, int(720))
print("exposure!", video.get(cv2.CAP_PROP_AUTO_EXPOSURE))
video.set(cv2.CAP_PROP_FPS, 5)
else:
video = cv2.VideoCapture(str(video_path))
fps = video.get(cv2.CAP_PROP_FPS)
target_frame_duration = 1./fps
logger.info(f"Emit frames at {fps} fps")

View file

@ -73,7 +73,7 @@ def start():
ExceptionHandlingProcess(target=run_tracker, kwargs={'config': args, 'is_running': isRunning}, name='tracker'),
]
if args.render_file or args.render_url:
if args.render_file or args.render_url or args.render_window:
procs.append(
ExceptionHandlingProcess(target=run_renderer, kwargs={'config': args, 'is_running': isRunning}, name='renderer')
)

View file

@ -326,13 +326,18 @@ class PredictionServer:
start = time.time()
with warnings.catch_warnings():
warnings.simplefilter('ignore') # prevent deluge of UserWarning from torch's rrn.py
# in the OnlineMultimodalGenerativeCVAE (see trajectron.model.online_mgcvae.py) each node's distribution
# is put stored in self.latent.p_dist by OnlineMultimodalGenerativeCVAE.p_z_x(). Type: torch.distributions.OneHotCategorical
# Later sampling in discrete_latent.py: DiscreteLatent.sample_p()
dists, preds = trajectron.incremental_forward(input_dict,
maps,
prediction_horizon=self.config.prediction_horizon, # TODO: make variable
num_samples=self.config.num_samples, # TODO: make variable
full_dist=self.config.full_dist,
gmm_mode=self.config.gmm_mode,
z_mode=self.config.z_mode)
full_dist=self.config.full_dist, # "The models full sampled output, where z and y are sampled sequentially"
gmm_mode=self.config.gmm_mode, # "If True: The mode of the Gaussian Mixture Model (GMM) is sampled (see trajectron.model.mgcvae.py)"
z_mode=self.config.z_mode # "Predictions from the models most-likely high-level latent behavior mode" (see trajecton.models.components.discrete_latent:sample_p(most_likely_z=z_mode))
)
end = time.time()
logger.debug("took %.2f s (= %.2f Hz) w/ %d nodes and %d edges" % (end - start,
1. / (end - start), len(trajectron.nodes),

View file

@ -81,6 +81,10 @@ class Renderer:
self.out_writer = self.start_writer() if self.config.render_file else None
self.streaming_process = self.start_streaming() if self.config.render_url else None
if self.config.render_window:
cv2.namedWindow("frame", cv2.WND_PROP_FULLSCREEN)
cv2.setWindowProperty("frame",cv2.WND_PROP_FULLSCREEN,cv2.WINDOW_FULLSCREEN)
def start_writer(self):
if not self.config.output_dir.exists():
raise FileNotFoundError("Path does not exist")
@ -121,18 +125,33 @@ class Renderer:
def run(self):
frame = None
prediction_frame = None
i=0
first_time = None
while self.is_running.is_set():
i+=1
zmq_ev = self.frame_sock.poll(timeout=2000)
if not zmq_ev:
# when no data comes in, loop so that is_running is checked
continue
frame: Frame = self.frame_sock.recv_pyobj()
# zmq_ev = self.frame_sock.poll(timeout=2000)
# if not zmq_ev:
# # when no data comes in, loop so that is_running is checked
# continue
try:
frame: Frame = self.frame_sock.recv_pyobj(zmq.NOBLOCK)
except zmq.ZMQError as e:
idx = frame.index if frame else "NONE"
logger.debug(f"reuse video frame {idx}")
else:
logger.debug(f'new video frame {frame.index}')
if frame is None:
# might need to wait a few iterations before first frame comes available
time.sleep(.1)
continue
try:
prediction_frame: Frame = self.prediction_sock.recv_pyobj(zmq.NOBLOCK)
except zmq.ZMQError as e:
@ -141,7 +160,7 @@ class Renderer:
if first_time is None:
first_time = frame.time
decorate_frame(frame, prediction_frame, first_time, self.config)
img = decorate_frame(frame, prediction_frame, first_time, self.config)
img_path = (self.config.output_dir / f"{i:05d}.png").resolve()
@ -149,9 +168,12 @@ class Renderer:
logger.debug(f"write frame {frame.time - first_time:.3f}s")
if self.out_writer:
self.out_writer.write(frame.img)
self.out_writer.write(img)
if self.streaming_process:
self.streaming_process.stdin.write(frame.img.tobytes())
self.streaming_process.stdin.write(img.tobytes())
if self.config.render_window:
cv2.imshow('frame',img)
cv2.waitKey(1)
logger.info('Stopping')
if i>2:
@ -181,8 +203,8 @@ def decorate_frame(frame: Frame, prediction_frame: Frame, first_time: float, con
# Fill image with red color(set each pixel to red)
overlay[:] = (130, 0, 75)
frame.img = cv2.addWeighted(frame.img, .4, overlay, .6, 0)
img = frame.img
img = cv2.addWeighted(frame.img, .4, overlay, .6, 0)
# img = frame.img.copy()
# all not working:
# if i == 1: