Basic lidar tracker (still glitchy)

This commit is contained in:
Ruben van de Ven 2025-10-28 16:03:57 +01:00
parent c4cdda64e1
commit e6d1457320
5 changed files with 331 additions and 99 deletions

View file

@ -23,8 +23,8 @@ directory=%(here)s
autostart=false
[program:video]
# command=uv run trap_video_source --homography ../DATASETS/hof3/homography.json --video-src ../DATASETS/hof3/hof3-cam-demo-twoperson.mp4 --calibration ../DATASETS/hof3/calibration.json --video-loop
command=uv run trap_video_source --homography ../DATASETS/hof3-cam-baumer-cropped/homography.json --video-src gige://../DATASETS/hof3-cam-baumer-cropped/gige_config.json --calibration ../DATASETS/hof3-cam-baumer-cropped/calibration.json
command=uv run trap_video_source --homography ../DATASETS/hof3/homography.json --video-src ../DATASETS/hof3/hof3-cam-demo-twoperson.mp4 --calibration ../DATASETS/hof3/calibration.json --video-loop
# command=uv run trap_video_source --homography ../DATASETS/hof3-cam-baumer-cropped/homography.json --video-src gige://../DATASETS/hof3-cam-baumer-cropped/gige_config.json --calibration ../DATASETS/hof3-cam-baumer-cropped/calibration.json
directory=%(here)s
directory=%(here)s

View file

@ -3,6 +3,7 @@ from __future__ import annotations
from abc import ABC, abstractmethod
import argparse
from collections import defaultdict
from copy import deepcopy
from enum import IntFlag
from itertools import cycle
import json
@ -252,6 +253,20 @@ class FisheyeCamera(DistortedCamera):
class UndistortedCamera(DistortedCamera):
def __init__(self, fps = 10):
self.fps = fps
self.H = np.eye(3,3)
def undistort_img(self, img: MatLike):
return deepcopy(img)
def undistort_points(self, distorted_points: PointList):
return deepcopy(distorted_points)
class Camera(DistortedCamera):
def __init__(self, mtx: cv2.Mat, dist: cv2.Mat, w: float, h: float, H: cv2.Mat, fps: float):
self.mtx = mtx
@ -372,7 +387,13 @@ class Track:
if not self.created_at:
self.created_at = time.time()
if not self.updated_at:
self.update_at = time.time()
self.updated_at = time.time()
def track_age(self) -> float:
return time.time() - self.created_at
def track_update_dt(self) -> float:
return time.time() - self.updated_at
def get_projected_history(self, H: Optional[cv2.Mat] = None, camera: Optional[DistortedCamera]= None) -> np.array:
foot_coordinates = [d.get_foot_coords() for d in self.history]

View file

@ -117,7 +117,7 @@ class CvRenderer(Node):
# return process
def run(self):
frame = None
self.frame = None
prediction_frame = None
tracker_frame = None
@ -129,6 +129,8 @@ class CvRenderer(Node):
cv2.moveWindow("frame", 0, -1)
if self.config.full_screen:
cv2.setWindowProperty("frame",cv2.WND_PROP_FULLSCREEN,cv2.WINDOW_FULLSCREEN)
cv2.setMouseCallback('frame',self.click_print_position)
# bgsub = cv2.createBackgroundSubtractorMOG2(120, 50, detectShadows=True)
while self.run_loop():
@ -140,7 +142,7 @@ class CvRenderer(Node):
# continue
try:
frame: Frame = self.frame_sock.recv_pyobj(zmq.NOBLOCK)
self.frame: Frame = self.frame_sock.recv_pyobj(zmq.NOBLOCK)
except zmq.ZMQError as e:
# idx = frame.index if frame else "NONE"
# logger.debug(f"reuse video frame {idx}")
@ -149,7 +151,7 @@ class CvRenderer(Node):
# logger.debug(f'new video frame {frame.index}')
if frame is None:
if self.frame is None:
# might need to wait a few iterations before first frame comes available
time.sleep(.1)
continue
@ -180,7 +182,7 @@ class CvRenderer(Node):
pass
if first_time is None:
first_time = frame.time
first_time = self.frame.time
# img = frame.img
# save_file = Path("videos/snap.png")
@ -188,9 +190,9 @@ class CvRenderer(Node):
# img = frame.camera.img_to_world(frame.img, 100)
# cv2.imwrite(save_file, img)
img = decorate_frame(frame, tracker_frame, prediction_frame, first_time, self.config, self.tracks, self.predictions, self.detections, self.config.render_clusters, self.debug_lines, self.scale)
img = decorate_frame(self.frame, tracker_frame, prediction_frame, first_time, self.config, self.tracks, self.predictions, self.detections, self.config.render_clusters, self.debug_lines, self.scale)
logger.debug(f"write frame {frame.time - first_time:.3f}s")
logger.debug(f"write frame {self.frame.time - first_time:.3f}s")
if self.out_writer:
self.out_writer.write(img)
if self.streaming_process:
@ -203,10 +205,10 @@ class CvRenderer(Node):
# clear out old tracks & predictions:
for track_id, track in list(self.tracks.items()):
if get_animation_position(track, frame) == 1:
if get_animation_position(track, self.frame) == 1:
self.tracks.pop(track_id)
for prediction_id, track in list(self.predictions.items()):
if get_animation_position(track, frame) == 1:
if get_animation_position(track, self.frame) == 1:
self.predictions.pop(prediction_id)
logger.info('Stopping')
@ -275,6 +277,18 @@ class CvRenderer(Node):
default="../DATASETS/hof3/map_hof.svg")
return render_parser
def click_print_position(self, event,x,y,flags,param):
# if event == cv2.EVENT_LBUTTONDBLCLK:
if event == cv2.EVENT_LBUTTONUP:
if not self.frame:
return
scale = 100
print("click position:", x/scale, y/scale)
# self.frame.camera.points_img_to_world([[x, y]], 1)
# cv2.circle(img,(x,y),100,(255,0,0),-1)
mouseX,mouseY = x,y
# colorset = itertools.product([0,255], repeat=3) # but remove white
# colorset = [(0, 0, 0),
# (0, 0, 255),
@ -345,7 +359,7 @@ def decorate_frame(frame: Frame, tracker_frame: Frame, prediction_frame: Frame,
[detection.l, detection.t],
[detection.l + detection.w, detection.t + detection.h],
]
points = frame.camera.points_img_to_world(points, scale)
points = tracker_frame.camera.points_img_to_world(points, scale)
points = [to_point(p) for p in points] # to int
w = points[1][0]-points[2][0]
@ -363,7 +377,7 @@ def decorate_frame(frame: Frame, tracker_frame: Frame, prediction_frame: Frame,
else:
for track_id, track in tracks.items():
inv_H = np.linalg.pinv(tracker_frame.H)
draw_track_projected(img, track, int(track_id), frame.camera, conversion)
draw_track_projected(img, track, int(track_id), tracker_frame.camera, conversion)
for line in debug_lines:
for rp1, rp2 in zip(line.points, line.points[1:]):

View file

@ -1,17 +1,19 @@
from argparse import ArgumentParser
from collections import deque, namedtuple
from collections import defaultdict, deque, namedtuple
from copy import copy
from pathlib import Path
import select
import socket
from typing import List
from typing import DefaultDict, List
import numpy as np
import open3d as o3d
from sklearn.cluster import DBSCAN
from scipy.spatial.distance import cdist
import logging
import time
from trap.base import Camera, Detection, Frame, Track, UndistortedCamera
from trap.node import Node
from bytetracker import BYTETracker
import velodyne_decoder as vd
@ -29,18 +31,57 @@ ResultTuple = namedtuple("StampCloudTuple", ("stamp", "points"))
# see https://github.com/valgur/velodyne_decoder/issues/4
def read_live_data(ip, port, config, as_pcl_structs=False):
decoder = vd.StreamDecoder(config)
s = socket.socket(socket.AF_INET, socket.SOCK_DGRAM)
# for this to work run sudo sysctl -w net.core.rmem_max=4194304 && sudo sysctl -w net.core.rmem_default=4194304
s.setsockopt(socket.SOL_SOCKET, socket.SO_RCVBUF, 2 * 1024 * 1024) # 4MB
s.bind((ip, port))
sock = socket.socket(socket.AF_INET, socket.SOCK_DGRAM)
# To increase the buffer at network device, you might want to run sudo sysctl -w net.core.rmem_max=4194304 && sudo sysctl -w net.core.rmem_default=4194304
sock.setsockopt(socket.SOL_SOCKET, socket.SO_RCVBUF, 4 * 1024 * 1024) # 4MB
sock.bind((ip, port))
sock.setblocking(False)
inputs = [sock]
while True:
# aggre
data, address = s.recvfrom(vd.PACKET_SIZE * 2)
readable, _, _ = select.select([sock], [], [], 1.0)
latest_data = {}
if sock in readable:
# Drain all packets, keep only the last one, this prevents buffer build-up
# in situations where the processing is not keeping up with the LiDAR speed
while True:
try:
data, addr = sock.recvfrom(vd.PACKET_SIZE * 2)
recv_stamp = time.time()
result = decoder.decode(recv_stamp, data, as_pcl_structs)
# this is like read_pcap
if result is not None:
yield ResultTuple(*result)
latest_data[addr] = ResultTuple(*result)
except BlockingIOError:
break # No more packets in the queue
for addr, data in latest_data.items():
yield data
else:
logger.warning("No LiDAR data in the last second.")
def filter_raw_points_by_distance(points: np.ndarray, center: np.ndarray, radius):
"""
Filters an Open3D point cloud based on distance from a center point.
Args:
pointcloud: An Open3D PointCloud object.
center: A NumPy array representing the center point (x, y, z).
radius: The maximum distance from the center point to keep points.
Returns:
A new Open3D PointCloud object containing only the points within the specified radius.
"""
distances = np.linalg.norm(points - center, axis=1) # Calculate distance from each point to the center
indices = np.where(distances <= radius)[0] # Find indices of points within the radius
filtered_points = points[indices]
return filtered_points
class LidarTracker(Node):
def setup(self):
@ -51,14 +92,23 @@ class LidarTracker(Node):
calibration = vd.Calibration.read( "VLP-16.yaml")
config = vd.Config(model=vd.Model.VLP16, calibration=calibration)
if self.config.pcap and self.config.pcap.exists():
# self.sequence_generator = vd.read_pcap(self.config.pcap, config, time_range=[1761139039+100,None])
self.sequence_generator = vd.read_pcap(self.config.pcap, config, time_range=[1761139039+30,None])
# self.sequence_generator = vd.read_pcap(self.config.pcap, config)
else:
self.sequence_generator = read_live_data(self.config.ip, self.config.port, config)
self.tracker = BYTETracker(frame_rate=10)
self.tracks: DefaultDict[str, Track] = defaultdict(lambda: Track())
self.room_filter = VoxelOccupancyFilter(
voxel_size=0.3,
voxel_size=.5,
history_size=80,
history_interval=8, #not every frame needs to impact history
ready_for_action_at=25,
static_threshold=0.5
static_threshold=0.3
)
def run(self):
@ -70,14 +120,19 @@ class LidarTracker(Node):
# Set the point size
render_option = vis.get_render_option()
render_option.point_size = 1.0 # Smaller point size
render_option.background_color = np.array([0,0,0]) # Smaller point size
# render_option.camera
view_ctl = vis.get_view_control()
view_ctl.set_lookat([0, 0, 0])
view_ctl.set_up([0, 1, 0])
front_direction = np.array([0.5, 0.5, -1.0])
front_direction = front_direction / np.linalg.norm(front_direction) # Normalize!
view_ctl.set_front(front_direction.tolist()) # Convert to list
view_ctl.set_zoom(0.9)
# front_direction = np.array([0,0,-1])
# front_direction = front_direction / np.linalg.norm(front_direction) # Normalize!
# view_ctl.set_front(front_direction.tolist()) # Convert to list
# view_ctl.set_lookat([0, 0, 0])
# view_ctl.set_up([0, 0, 1])
# view_ctl.set_zoom(1)
# view_ctl.set_front([0, 0, -1])
# view_ctl.set_up([1, 0, 0])
# view_ctl.set_zoom(1.5)
geom_added = False
@ -87,32 +142,61 @@ class LidarTracker(Node):
live_filtered_pcd = o3d.geometry.PointCloud()
live_filtered_pcd.paint_uniform_color([0.0, 0.7, 0.2])
coordinate_indicator = o3d.geometry.TriangleMesh.create_coordinate_frame()
vis.add_geometry(coordinate_indicator, False)
ground_lines = o3d.geometry.LineSet(
o3d.utility.Vector3dVector([[0,0,0], [4,4,0], [4,-4,0], [-4,-4,0], [-4,4,0]]),
o3d.utility.Vector2iVector([[0,1], [0,2], [0,3], [0,4], [1,2], [2,3], [3,4], [4,1]])
)
ground_lines.paint_uniform_color([0,1,0])
vis.add_geometry(ground_lines, False)
ground_normal = np.array([1,1,0])
found_planes = False
transform_matrix = np.array([[0.8457653099655785, -0.4625540400071917, -0.26594134791689383, -6.176546342430921], [-0.4412543343066861, -0.886591089552013, 0.13874744099455633, 4.588060710249307], [-0.29995941877926136, -4.340500985936626e-17, -0.9539519626719197, 0.6514765464071752], [0.0, 0.0, 0.0, 1.0]])
# transform_matrix = np.array([[0.8393535624356702, -0.4568202093538991, -0.2946199137064719, 7.480984770326982], [-0.46710069069557, -0.8833469220220505, 0.03892505755796417, 1.2182001838057144], [-0.2780333468817559, 0.1049452794556715, -0.9548214211786936, -1.598860001424085], [0.0, 0.0, 0.0, 1.0]])
camera = UndistortedCamera()
frame_idx = 0
while self.run_loop():
frame_idx += 1
stamp, lidar_points = next(self.sequence_generator)
# print(len(lidar_points))
# print(stamp)
raw_frame_points = velodyne_to_points(lidar_points)
raw_frame_points = filter_raw_points_by_distance(raw_frame_points, np.array([0,0,0]), 15)
frame_points = velodyne_to_points(lidar_points)
current_pcd = o3d.geometry.PointCloud()
current_pcd.points = points_to_o3d(frame_points)
current_pcd.points = points_to_o3d(raw_frame_points)
R = current_pcd.get_rotation_matrix_from_xyz((0,0, np.pi / 4+.1))
current_pcd.rotate(R)
current_pcd.translate( [9.7, -11.14, 0])
current_pcd = flip_points_along_axis(current_pcd, 1)
# current_pcd.scale(np.array([1,-1, 1]), )
# current_pcd.transform(transform_matrix)
if not found_planes:
self.planes = segment_planes(current_pcd, 4, distance_threshold=.2)
found_planes = True
self.logger.info(f"Found {len(self.planes)} planes in points cloud")
# if not found_planes:
# self.planes = segment_planes(current_pcd, 4, distance_threshold=.2)
# found_planes = True
# self.logger.info(f"Found {len(self.planes)} planes in points cloud")
if self.config.viz:
for i, plane in enumerate(self.planes):
color = [0,0,0]
color[i%3] = 1
# color = [1*i/len(self.planes),0,0]
plane.paint_uniform_color(color)
print(f"Add plane {len(plane.points)} points")
vis.add_geometry(plane, True)
# if self.config.viz:
# for i, plane in enumerate(self.planes):
# color = [0,0,0]
# color[i%3] = 1
# # color = [1*i/len(self.planes),0,0]
# plane.paint_uniform_color(color)
# print(f"Add plane {len(plane.points)} points")
# vis.add_geometry(plane, True)
@ -161,15 +245,73 @@ class LidarTracker(Node):
# boxes, centroids = get_cluster_boxes(points_2d, labels, min_area= 0.3*0.3)
boxes, centroids = get_cluster_boxes(points_2d, labels, min_area= self.config.min_box_area)
# append confidence and class (placeholders)
detections = np.ones((boxes.shape[0],boxes.shape[1]+2));
detections[:,:-2] = boxes
# detections = np.array([
# # coords in xyxy
# box.tolist() + [1,1] for box in boxes
# ])
online_tracks = self.tracker.update(detections)
self.logger.debug(f"online tracks: {[t[4] for t in online_tracks]}")
removed_tracks = self.tracker.removed_stracks
# active_stracks = [track for track in self.tracker.tracked_stracks if track.is_activated]
active_stracks = [track for track in self.tracker.tracked_stracks if track.is_activated]
detections = [Detection.from_bytetrack(track, frame_idx) for track in active_stracks]
for detection in detections:
track = self.tracks[detection.track_id]
track.track_id = detection.track_id # for new tracks
track.fps = 10
track.frame_index = frame_idx
track.updated_at = time.time()
# track.fps = self.config.camera.fps # for new tracks
track.history.append(detection) # add to history
for t in removed_tracks:
if t.track_id in self.tracks:
del self.tracks[t.track_id]
# TODO: fix this oddity:
# else:
# logger.debug(f"removing non-existing track? id: {t.track_id}")
# reset for each iteration
# self.tracker.removed_stracks = []
active_track_ids = [d.track_id for d in detections]
active_tracks = {t.track_id: t.get_with_interpolated_history() for t in self.tracks.values() if t.track_id in active_track_ids}
# active_tracks = displacement_filter.apply_to_dict(active_tracks, frame.camera)# a filter to remove just detecting static objects
# frame = Frame(frame_idx, None, time.time(), self.tracks, camera.H, camera)
frame = Frame(frame_idx, None, time.time(), active_tracks,
camera.H, camera)
if self.config.smooth_tracks:
frame = self.smoother.smooth_frame_tracks(frame)
self.track_sock.send_pyobj(frame)
if len(centroids):
self.logger.debug(f"{centroids=}")
if self.config.viz:
line_sets = boxes_to_lineset(boxes, 0)
for s in line_sets:
vis.add_geometry(s, False)
line_sets = []
for box in boxes:
line_sets.append(box_to_lineset(box, 0, [.3,.3,.3]))
for track in active_stracks:
color = RGB_COLORS[track.track_id % len(RGB_COLORS)]
line_sets.append(box_to_lineset(track.tlbr, 0, color))
for line_set in line_sets:
vis.add_geometry(line_set, False)
elif self.config.viz:
line_sets = []
live_filtered_pcd.points = o3d.utility.Vector3dVector(np.empty([0,3]))
@ -196,13 +338,14 @@ class LidarTracker(Node):
if not geom_added:
vis.add_geometry(live_filtered_pcd, True)
# vis.add_geometry(live_pcd, True)
vis.add_geometry(live_pcd, True)
for s in spheres:
vis.add_geometry(s, False)
geom_added = True
else:
# vis.remove_geometry(live_pcd, False)
vis.update_geometry(live_pcd)
vis.update_geometry(live_filtered_pcd)
# vis.update_geometry(live_pcd)
for s in spheres:
vis.add_geometry(s, False)
@ -233,6 +376,14 @@ class LidarTracker(Node):
help='Port of the incomming ip packets',
type=int,
default=2368)
argparser.add_argument('--pcap',
help='Read pcap instead of socket',
type=Path,
default=None)
# argparser.add_argument('--pcap-offset',
# help='offset in seconds',
# type=int,
# default=None)
argparser.add_argument('--min-box-area',
help='Minimum area of bounding boxes to consider them for tracking',
type=float,
@ -342,10 +493,13 @@ def get_cluster_boxes(points_xy, labels, min_area = 0):
boxes.append([x_min, y_min, x_max, y_max])
centroids.append(centroid)
if not len(boxes):
return np.empty([0,4]), np.empty([0,2])
return np.array(boxes), np.array(centroids)
def boxes_to_lineset(boxes, z_height=0.0, color=[1.0, 0.0, 0.0]) -> List[o3d.geometry.LineSet]:
def box_to_lineset(box, z_height=0.0, color=[1.0, 0.0, 0.0]) -> List[o3d.geometry.LineSet]:
"""Draws a list of 2D bounding boxes onto an Open3D visualization.
Args:
@ -354,8 +508,6 @@ def boxes_to_lineset(boxes, z_height=0.0, color=[1.0, 0.0, 0.0]) -> List[o3d.geo
z_height (float): The z-coordinate height of the boxes.
color (list): The color of the boxes (RGB).
"""
sets = []
for box in boxes:
x_min, y_min, x_max, y_max = box
# Define the 8 corners of the box in 3D
@ -384,8 +536,7 @@ def boxes_to_lineset(boxes, z_height=0.0, color=[1.0, 0.0, 0.0]) -> List[o3d.geo
# Create a LineSet from the lines
line_set = o3d.geometry.LineSet(points, lines)
line_set.colors = o3d.utility.Vector3dVector(np.tile(color, (len(line_idxs), 1))) # Assign color
sets.append(line_set)
return sets
return line_set
@ -432,6 +583,7 @@ class VoxelOccupancyFilter:
self.history_size = history_size
self.static_threshold = static_threshold
self.voxel_history = deque(maxlen=history_size)
self.static_voxels = {}
self.ready_for_action_at = ready_for_action_at if ready_for_action_at else history_size
@ -461,30 +613,30 @@ class VoxelOccupancyFilter:
"""
self.i = (self.i + 1) % self.history_interval
current_voxel_set = self._point_cloud_to_voxel_set(current_pcd)
# Count voxel occurrences in history
voxel_counts = {}
for voxel_set in self.voxel_history:
for v in voxel_set:
voxel_counts[v] = voxel_counts.get(v, 0) + 1
# Determine static voxels
static_voxels = {v for v, count in voxel_counts.items()
if count / len(self.voxel_history) >= self.static_threshold}
# Filter out static points from current point cloud
filtered_points = []
for pt in np.asarray(current_pcd.points):
voxel = tuple(np.floor(pt / self.voxel_size).astype(np.int32))
if voxel not in static_voxels:
if voxel not in self.static_voxels:
filtered_points.append(pt)
# Update history: until full, from there every n frame
# this way, minimal history is necessary to still to encode participants, while
# handling more semi-permanent changes to the space
if self.i == 0 or len(self.voxel_history) < self.history_size:
current_voxel_set = self._point_cloud_to_voxel_set(current_pcd)
self.voxel_history.append(current_voxel_set)
voxel_counts = {}
# Count voxel occurrences in history
for voxel_set in self.voxel_history:
for v in voxel_set:
voxel_counts[v] = voxel_counts.get(v, 0) + 1
# Determine static voxels
self.static_voxels = {v for v, count in voxel_counts.items()
if count / len(self.voxel_history) >= self.static_threshold}
if not self.initialised and len(self.voxel_history) >= self.ready_for_action_at:
logger.info(f"Static object filter ready: {len(self.voxel_history)} historical items, keep max {self.history_size}")
@ -630,3 +782,42 @@ def project_to_plane(point_cloud, plane_normal: np.ndarray):
# projected_pc.points = o3d.utility.Vector3dVector(projected_points)
return projected_points
RGB_COLORS = [
(255, 0, 0),
(0, 255, 0),
# (0, 0, 255),# blue used for missing map
(0, 255, 255),
(255, 255, 0),
]
def flip_points_along_axis(pcd, axis):
"""
Flips the points of an Open3D PointCloud along a specified axis.
Args:
pcd (open3d.geometry.PointCloud): The input PointCloud.
axis (int): The axis to flip along (0 for X, 1 for Y, 2 for Z).
Returns:
open3d.geometry.PointCloud: The PointCloud with flipped points.
"""
points = np.asarray(pcd.points)
flipped_points = points.copy()
if axis == 0: # Flip along X-axis
flipped_points[:, 0] = -flipped_points[:, 0]
elif axis == 1: # Flip along Y-axis
flipped_points[:, 1] = -flipped_points[:, 1]
elif axis == 2: # Flip along Z-axis
flipped_points[:, 2] = -flipped_points[:, 2]
else:
raise ValueError("Invalid axis. Must be 0, 1, or 2.")
flipped_pcd = o3d.geometry.PointCloud()
flipped_pcd.points = o3d.utility.Vector3dVector(flipped_points)
return flipped_pcd

View file

@ -96,8 +96,14 @@ class GigE(VideoSource):
# print('exposure time', self.camera.f.ExposureAutoMaxValue.Set(20000)) # shutter 1/50 (hence; 1000000/shutter)
print('exposure time', self.camera.f.ExposureAutoMaxValue.Set(60000)) # otherwise it becomes too blurry in movements
print('brightness targt', self.camera.f.BrightnessAutoNominalValue.Get())
print('brightness targt', self.camera.f.BrightnessAutoNominalValue.Set(35))
print('brightness targt', self.camera.f.BrightnessAutoNominalValue.Set(value=35))
# print('brightness targt', self.camera.f.Auto.Set(neoapi.BrightnessCorrection_On))
# print('brightness targt', self.camera.f.BrightnessCorrection.Set(neoapi.BrightnessCorrection_On))
# print('brightness targt', self.camera.f.BrightnessCorrection.Set(neoapi.BrightnessCorrection_On))
print('exposure time', self.camera.f.ExposureTime.Get())
print('LUTEnable', self.camera.f.LUTEnable.Get())
print('LUTEnable', self.camera.f.LUTEnable.Set(True))
# print('LUTEnable', self.camera.f.LUTEnable.Set(False))
print('Gamma', self.camera.f.Gamma.Set(0.45))
# neoapi.region