298 lines
No EOL
12 KiB
Python
298 lines
No EOL
12 KiB
Python
from dataclasses import dataclass
|
|
from itertools import cycle
|
|
import json
|
|
import logging
|
|
import math
|
|
from os import PathLike
|
|
from pathlib import Path
|
|
import time
|
|
from typing import Any, Generator, Iterable, List, Literal, Optional, Tuple
|
|
import neoapi
|
|
import cv2
|
|
import numpy as np
|
|
|
|
from trap.base import Camera, UrlOrPath
|
|
|
|
logger = logging.getLogger('video_source')
|
|
|
|
class VideoSource:
|
|
"""Video Frame generator
|
|
"""
|
|
def recv(self) -> Generator[Optional[cv2.typing.MatLike], Any, None]:
|
|
raise RuntimeError("Not implemented")
|
|
|
|
def __iter__(self):
|
|
for i in self.recv():
|
|
yield i
|
|
|
|
BinningValue = Literal[1, 2]
|
|
Coordinate = Tuple[int, int]
|
|
|
|
@dataclass
|
|
class GigEConfig:
|
|
identifier: Optional[str] = None
|
|
binning_h: BinningValue = 1
|
|
binning_v: BinningValue = 1
|
|
pixel_format: int = neoapi.PixelFormat_BayerRG8
|
|
|
|
# when changing these values, make sure you also tweak the calibration
|
|
width: int = 2448
|
|
height: int = 2048
|
|
|
|
# changing these _automatically changes calibration cx and cy_!!
|
|
offset_x: int = 0
|
|
offset_y: int = 0
|
|
|
|
post_crop_tl: Optional[Coordinate] = None
|
|
post_crop_br: Optional[Coordinate] = None
|
|
|
|
@classmethod
|
|
def from_file(cls, file: PathLike):
|
|
with open(file, 'r') as fp:
|
|
return cls(**json.load(fp))
|
|
|
|
|
|
class GigE(VideoSource):
|
|
def __init__(self, config=GigEConfig):
|
|
|
|
self.config = config
|
|
|
|
self.camera = neoapi.Cam()
|
|
# self.camera.Connect('-B127')
|
|
self.camera.Connect(self.config.identifier)
|
|
# Default buffer mode, streaming, always returns latest frame
|
|
self.camera.SetImageBufferCount(10)
|
|
# neoAPI docs: Setting the neoapi.Cam.SetImageBufferCycleCount()to one ensures that all buffers but one are given back to the neoAPI to be re-cycled and never given to the user by the neoapi.Cam.GetImage() method.
|
|
self.camera.SetImageBufferCycleCount(1)
|
|
self.setPixelFormat(self.config.pixel_format)
|
|
|
|
self.cam_is_configured = False
|
|
|
|
self.converter_settings = neoapi.ConverterSettings()
|
|
self.converter_settings.SetDebayerFormat('BGR8') # opencv
|
|
self.converter_settings.SetDemosaicingMethod(neoapi.ConverterSettings.Demosaicing_Baumer5x5)
|
|
# self.converter_settings.SetSharpeningMode(neoapi.ConverterSettings.Sharpening_Global)
|
|
# self.converter_settings.SetSharpeningMode(neoapi.ConverterSettings.Sharpening_Adaptive)
|
|
# self.converter_settings.SetSharpeningMode(neoapi.ConverterSettings.Sharpening_ActiveNoiseReduction)
|
|
self.converter_settings.SetSharpeningMode(neoapi.ConverterSettings.Sharpening_Off)
|
|
self.converter_settings.SetSharpeningFactor(1)
|
|
self.converter_settings.SetSharpeningSensitivityThreshold(2)
|
|
|
|
|
|
|
|
|
|
def configCam(self):
|
|
if self.camera.IsConnected():
|
|
self.setPixelFormat(self.config.pixel_format)
|
|
|
|
# self.camera.f.PixelFormat.Set(neoapi.PixelFormat_RGB8)
|
|
self.camera.f.BinningHorizontal.Set(self.config.binning_h)
|
|
self.camera.f.BinningVertical.Set(self.config.binning_v)
|
|
self.camera.f.Height.Set(self.config.height)
|
|
self.camera.f.Width.Set(self.config.width)
|
|
self.camera.f.OffsetX.Set(self.config.offset_x)
|
|
self.camera.f.OffsetY.Set(self.config.offset_y)
|
|
|
|
# print('exposure time', self.camera.f.ExposureAutoMaxValue.Set(20000)) # shutter 1/50 (hence; 1000000/shutter)
|
|
print('exposure time', self.camera.f.ExposureAutoMaxValue.Set(60000)) # otherwise it becomes too blurry in movements
|
|
print('brightness targt', self.camera.f.BrightnessAutoNominalValue.Get())
|
|
print('brightness targt', self.camera.f.BrightnessAutoNominalValue.Set(value=35))
|
|
# print('brightness targt', self.camera.f.Auto.Set(neoapi.BrightnessCorrection_On))
|
|
# print('brightness targt', self.camera.f.BrightnessCorrection.Set(neoapi.BrightnessCorrection_On))
|
|
# print('brightness targt', self.camera.f.BrightnessCorrection.Set(neoapi.BrightnessCorrection_On))
|
|
print('exposure time', self.camera.f.ExposureTime.Get())
|
|
print('LUTEnable', self.camera.f.LUTEnable.Get())
|
|
print('LUTEnable', self.camera.f.LUTEnable.Set(True))
|
|
# print('LUTEnable', self.camera.f.LUTEnable.Set(False))
|
|
print('Gamma', self.camera.f.Gamma.Set(0.45))
|
|
|
|
# neoapi.region
|
|
# self.camera.f.regeo
|
|
# print('LUT', self.camera.f.LUTIndex.Get())
|
|
# print('LUT', self.camera.f.LUTEnable.Get())
|
|
# print('exposure time max', self.camera.f.ExposureTimeGapMax.Get())
|
|
# print('exposure time min', self.camera.f.ExposureTimeGapMin.Get())
|
|
# self.pixfmt = self.camera.f.PixelFormat.Get()
|
|
|
|
self.cam_is_configured = True
|
|
|
|
def setPixelFormat(self, pixfmt):
|
|
self.pixfmt = pixfmt
|
|
self.camera.f.PixelFormat.Set(pixfmt)
|
|
# self.pixfmt = self.camera.f.PixelFormat.Get()
|
|
|
|
|
|
def recv(self):
|
|
while True:
|
|
# print('receive')
|
|
if not self.camera.IsConnected():
|
|
self.cam_is_configured = False
|
|
return
|
|
|
|
if not self.cam_is_configured:
|
|
self.configCam()
|
|
|
|
|
|
|
|
i = self.camera.GetImage(0)
|
|
if i.IsEmpty():
|
|
time.sleep(.01)
|
|
continue
|
|
|
|
# print(i.GetAvailablePixelFormats())
|
|
i = i.Convert(self.converter_settings)
|
|
|
|
if i.IsEmpty():
|
|
time.sleep(.01)
|
|
continue
|
|
|
|
img = i.GetNPArray()
|
|
|
|
# imgarray = i.GetNPArray()
|
|
# if self.pixfmt == neoapi.PixelFormat_BayerRG12:
|
|
# img = cv2.cvtColor(imgarray, cv2.COLOR_BayerRG2RGB)
|
|
# elif self.pixfmt == neoapi.PixelFormat_BayerRG8:
|
|
# img = cv2.cvtColor(imgarray, cv2.COLOR_BayerRG2RGB)
|
|
# else:
|
|
# img = cv2.cvtColor(imgarray, cv2.COLOR_BGR2RGB)
|
|
|
|
# if img.dtype == np.uint16:
|
|
# img = cv2.convertScaleAbs(img, alpha=(255.0/65535.0))
|
|
img = self._crop(img)
|
|
yield img
|
|
|
|
def _crop(self, img):
|
|
tl = self.config.post_crop_tl or (0,0)
|
|
br = self.config.post_crop_br or (img.shape[1], img.shape[0])
|
|
|
|
return img[tl[1]:br[1],tl[0]:br[0],:]
|
|
|
|
class SingleCvVideoSource(VideoSource):
|
|
def recv(self):
|
|
while True:
|
|
ret, img = self.video.read()
|
|
self.frame_idx+=1
|
|
|
|
# seek to 0 if video has finished. Infinite loop
|
|
if not ret:
|
|
# now loading multiple files
|
|
break
|
|
|
|
# frame = Frame(index=self.n, img=img, H=self.camera.H, camera=self.camera)
|
|
yield img
|
|
|
|
class RtspSource(SingleCvVideoSource):
|
|
def __init__(self, video_url: str | Path, camera: Camera = None):
|
|
# keep max 1 frame in app-buffer (0 = unlimited)
|
|
# When using gstreamer 1.28 drop=true is deprecated, use: leaky-type=2 which frame to drop: https://gstreamer.freedesktop.org/documentation/applib/gstappsrc.html?gi-language=c
|
|
|
|
gst = f"rtspsrc location={video_url} latency=0 buffer-mode=auto ! decodebin ! videoconvert ! appsink max-buffers=1 drop=true"
|
|
logger.info(f"Capture gstreamer (gst-launch-1.0): {gst}")
|
|
self.video = cv2.VideoCapture(gst, cv2.CAP_GSTREAMER)
|
|
self.frame_idx = 0
|
|
|
|
|
|
class FilelistSource(SingleCvVideoSource):
|
|
def __init__(self, video_sources: Iterable[UrlOrPath], camera: Camera = None, delay = True, offset = 0, end: Optional[int] = None, loop=False):
|
|
# store current position
|
|
self.video_sources = video_sources if not loop else cycle(video_sources)
|
|
self.camera = camera
|
|
self.video_path = None
|
|
self.video_nr = None
|
|
self.frame_count = None
|
|
self.frame_idx = None
|
|
self.n = 0
|
|
self.delay_generation = delay
|
|
self.offset = offset
|
|
self.end = end
|
|
|
|
def recv(self):
|
|
prev_time = time.time()
|
|
|
|
for video_nr, video_path in enumerate(self.video_sources):
|
|
self.video_path = video_path
|
|
self.video_nr = video_nr
|
|
logger.info(f"Play from '{str(video_path)}'")
|
|
video = cv2.VideoCapture(str(video_path))
|
|
fps = video.get(cv2.CAP_PROP_FPS)
|
|
target_frame_duration = 1./fps
|
|
self.frame_count = video.get(cv2.CAP_PROP_FRAME_COUNT)
|
|
if self.frame_count < 0:
|
|
self.frame_count = math.inf
|
|
self.frame_idx = 0
|
|
# TODO)) Video offset
|
|
if self.offset:
|
|
logger.info(f"Start at frame {self.offset}")
|
|
video.set(cv2.CAP_PROP_POS_FRAMES, self.offset)
|
|
self.frame_idx = self.offset
|
|
|
|
while True:
|
|
ret, img = video.read()
|
|
self.frame_idx+=1
|
|
self.n+=1
|
|
|
|
# seek to 0 if video has finished. Infinite loop
|
|
if not ret:
|
|
# now loading multiple files
|
|
break
|
|
|
|
if "DATASETS/hof/" in str(video_path):
|
|
# hack to mask out area
|
|
cv2.rectangle(img, (0,0), (800,200), (0,0,0), -1)
|
|
|
|
|
|
# frame = Frame(index=self.n, img=img, H=self.camera.H, camera=self.camera)
|
|
yield img
|
|
|
|
if self.end is not None and self.frame_idx >= self.end:
|
|
logger.info(f"Reached frame {self.end}")
|
|
break
|
|
|
|
if self.delay_generation:
|
|
# defer next loop
|
|
now = time.time()
|
|
time_diff = (now - prev_time)
|
|
if time_diff < target_frame_duration:
|
|
time.sleep(target_frame_duration - time_diff)
|
|
now += target_frame_duration - time_diff
|
|
|
|
prev_time = now
|
|
|
|
|
|
class CameraSource(SingleCvVideoSource):
|
|
def __init__(self, identifier: int, camera: Camera):
|
|
self.video = cv2.VideoCapture(identifier)
|
|
self.camera = camera
|
|
|
|
# TODO: make config variables
|
|
self.video.set(cv2.CAP_PROP_FRAME_WIDTH, int(self.camera.w))
|
|
self.video.set(cv2.CAP_PROP_FRAME_HEIGHT, int(self.camera.h))
|
|
# print("exposure!", video.get(cv2.CAP_PROP_AUTO_EXPOSURE))
|
|
self.video.set(cv2.CAP_PROP_FPS, self.camera.fps)
|
|
self.frame_idx = 0
|
|
|
|
def get_video_source(video_sources: List[UrlOrPath], camera: Optional[Camera] = None, frame_offset=0, frame_end:Optional[int]=None, loop=False):
|
|
|
|
if str(video_sources[0]).isdigit():
|
|
# numeric input is a CV camera
|
|
if frame_offset:
|
|
logger.info("video-offset ignored for camera source")
|
|
return CameraSource(int(str(video_sources[0])), camera)
|
|
elif video_sources[0].url.scheme == 'rtsp':
|
|
# video_sources[0].url.hostname
|
|
if frame_offset:
|
|
logger.info("video-offset ignored for rtsp source")
|
|
return RtspSource(video_sources[0])
|
|
elif video_sources[0].url.scheme == 'gige':
|
|
if frame_offset:
|
|
logger.info("video-offset ignored for gige source")
|
|
config = GigEConfig.from_file(Path(video_sources[0].url.netloc + video_sources[0].url.path))
|
|
return GigE(config)
|
|
else:
|
|
return FilelistSource(video_sources, offset = frame_offset, end=frame_end, loop=loop)
|
|
# os.environ["OPENCV_FFMPEG_CAPTURE_OPTIONS"] = "fflags;nobuffer|flags;low_delay|avioflags;direct|rtsp_transport;udp"
|
|
|
|
|
|
def get_video_source_from_str(video_sources: List[str]):
|
|
paths = [UrlOrPath(s) for s in video_sources]
|
|
return get_video_source(paths) |