Merge branch 'threaded'

This commit is contained in:
Ruben van de Ven 2025-08-06 21:41:19 +02:00
commit 7cbb3c670d

View file

@ -110,11 +110,11 @@ screenDrawCorners = np.array([[10,60], [90, 60], [10, 110], [90, 110]])
metricsSize = [1920,1080] metricsSize = [1920,1080]
# metricsSize = [1280,800] # metricsSize = [1280,800]
# metricsSize = [960,600] # metricsSize = [960,600]
metricsSize = [1080,1080] # no point in having it different from to the render size #metricsSize = [1080,1080] # no point in having it different from to the render size
dataframe = pd.DataFrame(columns=['x','y']) dataframe = pd.DataFrame(columns=['x','y'])
renderSize = [1280,800] renderSize = [1280,800]
renderSize = [1080,1080] renderSize = [1920,1080]
# Used to create a black backdrop, instead of the ugly Qt-gray, if neccessary # Used to create a black backdrop, instead of the ugly Qt-gray, if neccessary
screenSize = [1920,1080] screenSize = [1920,1080]
@ -245,6 +245,12 @@ def create_perspective_transform(src, dst, round=False, splat_args=False):
return res return res
def coordinatesToSrc(coordinates): def coordinatesToSrc(coordinates):
return np.array([
[-17000, -3000],
[46000, -3000],
[-17000, 6000],
[46000, 6000]
])
return np.array([coordinates['tl'], coordinates['tr'],coordinates['bl'], coordinates['br']]) return np.array([coordinates['tl'], coordinates['tr'],coordinates['bl'], coordinates['br']])
# coordinates of the screen boundaries # coordinates of the screen boundaries
@ -327,6 +333,7 @@ def captureFacesPoints(i):
currentPoint = None currentPoint = None
currentVectors = None currentVectors = None
currentPoints = [] currentPoints = []
translation_warning = False
if len(dets) > 0: if len(dets) > 0:
@ -370,6 +377,11 @@ def captureFacesPoints(i):
dist_coeffs = np.zeros((4,1)) # Assuming no lens distortion dist_coeffs = np.zeros((4,1)) # Assuming no lens distortion
(success, rotation_vector, translation_vector) = cv2.solvePnP(model_points, image_points, camera_matrix, dist_coeffs, flags=cv2.SOLVEPNP_ITERATIVE) (success, rotation_vector, translation_vector) = cv2.solvePnP(model_points, image_points, camera_matrix, dist_coeffs, flags=cv2.SOLVEPNP_ITERATIVE)
translation_warning = False
if translation_vector[2] < 0:
translation_vector[2] *= -1
translation_warning = True
logger.critical("Inversed Z axis!")
if not success: if not success:
logger.info("Error determening PnP {}".format(success) ) logger.info("Error determening PnP {}".format(success) )
@ -404,6 +416,9 @@ def captureFacesPoints(i):
# rz = np.arctan2(rotMatrix[0,1]/np.cos(ry), rotMatrix[0,0]/np.cos(ry)) # rz = np.arctan2(rotMatrix[0,1]/np.cos(ry), rotMatrix[0,0]/np.cos(ry))
# logger.info("rotation ml {} {} {}".format(rx, ry, rz) )# seems better? # logger.info("rotation ml {} {} {}".format(rx, ry, rz) )# seems better?
viewDirectionVector = np.dot(np.array([0.0, 0.0, 100.0]), rotMatrix) viewDirectionVector = np.dot(np.array([0.0, 0.0, 100.0]), rotMatrix)
if translation_warning:
viewDirectionVector[0] *= -1
viewDirectionVector[1] *= -1
if not args.hide_preview: if not args.hide_preview:
# draw little floorplan for x: 10 -> 50 maps to z: 0 -> 10000, x: -2000 -> 2000 # draw little floorplan for x: 10 -> 50 maps to z: 0 -> 10000, x: -2000 -> 2000
@ -455,7 +470,7 @@ def captureFacesPoints(i):
# TODO only draw nose line now, so we can change color depending whether on screen or not # TODO only draw nose line now, so we can change color depending whether on screen or not
results = {'currentPoint': currentPoint, 'currentPoints': currentPoints, 'currentVectors': currentVectors} results = {'currentPoint': currentPoint, 'currentPoints': currentPoints, 'currentVectors': currentVectors, 'translation_warning': translation_warning}
results['im'] = im if not args.hide_preview else None results['im'] = im if not args.hide_preview else None
try: try:
@ -512,6 +527,7 @@ if __name__ == '__main__':
currentPoint = result['currentPoint'] currentPoint = result['currentPoint']
currentPoints = result['currentPoints'] currentPoints = result['currentPoints']
currentVectors = result['currentVectors'] currentVectors = result['currentVectors']
translation_warning = result['translation_warning']
except Queue.Empty as e: except Queue.Empty as e:
logger.warn('Result queue empty') logger.warn('Result queue empty')
@ -649,7 +665,7 @@ if __name__ == '__main__':
normalisedMetricsColoredBGR, normalisedMetricsColoredBGR,
(metricsSize[0]/2,metricsSize[1]/2), (metricsSize[0]/2,metricsSize[1]/2),
tuple(targetPoint), tuple(targetPoint),
(255,0,0), 2 (255,0,0) if not translation_warning else (0,0,255), 2
) )
# cv2.putText(normalisedMetricsColoredBGR, "z: {}".format(currentPoint[2]), (10,70), cv2.FONT_HERSHEY_PLAIN, .7, (255,255,255)) # cv2.putText(normalisedMetricsColoredBGR, "z: {}".format(currentPoint[2]), (10,70), cv2.FONT_HERSHEY_PLAIN, .7, (255,255,255))