Add arguments & enable saving of frame
This commit is contained in:
parent
a40478f282
commit
35c53870da
2 changed files with 168 additions and 89 deletions
128
head_pose.py
128
head_pose.py
|
@ -20,12 +20,57 @@ if sys.version_info[0] < 3:
|
|||
else:
|
||||
import tkinter as Tk
|
||||
import time
|
||||
import datetime
|
||||
|
||||
import coloredlogs
|
||||
import argparse
|
||||
|
||||
argParser = argparse.ArgumentParser(description='Draw a heatmap')
|
||||
argParser.add_argument(
|
||||
'--camera',
|
||||
'-c',
|
||||
default=0,
|
||||
type=int,
|
||||
help='The id of the camera'
|
||||
)
|
||||
argParser.add_argument(
|
||||
'--verbose',
|
||||
'-v',
|
||||
action="store_true",
|
||||
)
|
||||
|
||||
argParser.add_argument(
|
||||
'--hide-graph',
|
||||
action="store_true",
|
||||
)
|
||||
|
||||
argParser.add_argument(
|
||||
'--hide-preview',
|
||||
action="store_true",
|
||||
)
|
||||
argParser.add_argument(
|
||||
'--output-dir',
|
||||
'-o',
|
||||
help="directory in which to store evey x files",
|
||||
)
|
||||
argParser.add_argument(
|
||||
'--save-interval',
|
||||
type=int,
|
||||
default=15,
|
||||
help="Interval at which to save heatmap frames (in seconds)"
|
||||
)
|
||||
|
||||
args = argParser.parse_args()
|
||||
|
||||
coloredlogs.install(
|
||||
level=logging.DEBUG if args.verbose else logging.INFO,
|
||||
# format='%(asctime)-15s %(name)s %(levelname)s: %(message)s'
|
||||
)
|
||||
|
||||
logging.basicConfig( format='%(asctime)-15s %(name)s %(levelname)s: %(message)s' )
|
||||
logger = logging.getLogger(__name__)
|
||||
|
||||
# Read Image
|
||||
c = cv2.VideoCapture(0)
|
||||
c = cv2.VideoCapture(args.camera)
|
||||
# im = cv2.imread("headPose.jpg");
|
||||
|
||||
|
||||
|
@ -63,7 +108,7 @@ def create_perspective_transform_matrix(src, dst):
|
|||
B = np.array(dst).reshape(8)
|
||||
af = np.dot(np.linalg.inv(A.T * A) * A.T, B)
|
||||
m = np.append(np.array(af).reshape(8), 1).reshape((3, 3))
|
||||
logger.info("Created transformmatrix: src %s dst %s m %s", src, dst, m)
|
||||
logger.info("Created transformmatrix: src {} dst {} m {}".format( src, dst, m))
|
||||
return m
|
||||
|
||||
# got this amazing thing from here: https://stackoverflow.com/a/24088499
|
||||
|
@ -158,6 +203,7 @@ else:
|
|||
coordinates = {'tl': None, 'tr': None, 'bl': None, 'br': None}
|
||||
transform = None
|
||||
|
||||
if not args.hide_graph:
|
||||
windowRoot = Tk.Toplevel()
|
||||
windowSize = (1000,1000)
|
||||
windowRoot.geometry('%dx%d+%d+%d' % (windowSize[0],windowSize[1],0,0))
|
||||
|
@ -169,14 +215,22 @@ axes.set_xlabel('X axis label')
|
|||
axes.set_ylabel('Y label')
|
||||
# canvas = Tk.Canvas(windowRoot,width=1000,height=1000)
|
||||
canvas = FigureCanvasTkAgg(figure,master=windowRoot)
|
||||
canvas.show()
|
||||
canvas.draw()
|
||||
canvas.get_tk_widget().pack(side=Tk.TOP, fill=Tk.BOTH, expand=1)
|
||||
|
||||
imageWindowRoot = Tk.Toplevel()
|
||||
imageWindowSize = (1000,1000)
|
||||
imageWindowRoot.geometry('%dx%d+%d+%d' % (imageWindowSize[0],imageWindowSize[1],0,0))
|
||||
imageWindowRoot.attributes("-fullscreen", True)
|
||||
# imageCanvas is where the heatmap image is drawn
|
||||
imageCanvas = Tk.Canvas(imageWindowRoot,width=1000,height=1000)
|
||||
imageCanvas.pack()
|
||||
imageWindowRoot.lift()
|
||||
|
||||
if args.output_dir:
|
||||
startTime = time.time()
|
||||
lastSaveTime = startTime
|
||||
|
||||
|
||||
while True:
|
||||
t1 = time.time()
|
||||
|
@ -233,17 +287,17 @@ while True:
|
|||
[0, 0, 1]], dtype = "double"
|
||||
)
|
||||
|
||||
# print ("Camera Matrix :\n {0}".format(camera_matrix))
|
||||
# logger.info ("Camera Matrix :\n {0}".format(camera_matrix))
|
||||
|
||||
dist_coeffs = np.zeros((4,1)) # Assuming no lens distortion
|
||||
(success, rotation_vector, translation_vector) = cv2.solvePnP(model_points, image_points, camera_matrix, dist_coeffs, flags=cv2.SOLVEPNP_ITERATIVE)
|
||||
|
||||
if not success:
|
||||
print("Error determening PnP", success)
|
||||
logger.info("Error determening PnP {}".format(success) )
|
||||
continue
|
||||
|
||||
logger.debug ("Rotation Vector:\n %s", rotation_vector)
|
||||
print ("Translation Vector:\n {0}".format(translation_vector))
|
||||
logger.info ("Translation Vector:\n {0}".format(translation_vector))
|
||||
|
||||
# Project a 3D point (0, 0, 1000.0) onto the image plane.
|
||||
# We use this to draw a line sticking out of the nose
|
||||
|
@ -260,15 +314,18 @@ while True:
|
|||
cv2.Rodrigues(rotation_vector, rotMatrix, jacobian=0)
|
||||
|
||||
# Find rotation: https://stackoverflow.com/a/15029416
|
||||
rx = np.arctan2(rotMatrix[2,1], rotMatrix[2,2])
|
||||
ry = np.arctan2(-rotMatrix[2,0], np.sqrt(np.square(rotMatrix[2,1]) + np.square(rotMatrix[2,2])))
|
||||
rz = np.arctan2(rotMatrix[1,0],rotMatrix[0,0])
|
||||
print("rotation", rx, ry, rz)
|
||||
ry = - np.arcsin(rotMatrix[0,2])
|
||||
rx = np.arctan2(rotMatrix[1,2]/np.cos(ry), rotMatrix[2,2]/np.cos(ry))
|
||||
rz = np.arctan2(rotMatrix[0,1]/np.cos(ry), rotMatrix[0,0]/np.cos(ry))
|
||||
print("rotation ml", rx, ry, rz) # seems better?
|
||||
# not used anymore :-)
|
||||
# rx = np.arctan2(rotMatrix[2,1], rotMatrix[2,2])
|
||||
# ry = np.arctan2(-rotMatrix[2,0], np.sqrt(np.square(rotMatrix[2,1]) + np.square(rotMatrix[2,2])))
|
||||
# rz = np.arctan2(rotMatrix[1,0],rotMatrix[0,0])
|
||||
# logger.info("rotation {} {} {}".format(rx, ry, rz) )
|
||||
# ry = - np.arcsin(rotMatrix[0,2])
|
||||
# rx = np.arctan2(rotMatrix[1,2]/np.cos(ry), rotMatrix[2,2]/np.cos(ry))
|
||||
# rz = np.arctan2(rotMatrix[0,1]/np.cos(ry), rotMatrix[0,0]/np.cos(ry))
|
||||
# logger.info("rotation ml {} {} {}".format(rx, ry, rz) )# seems better?
|
||||
viewDirectionVector = np.dot(np.array([0.0, 0.0, 1000.0]), rotMatrix)
|
||||
|
||||
if not args.hide_preview:
|
||||
# draw little floorplan for x: 10 -> 50 maps to z: 0 -> 10000, x: -2000 -> 2000
|
||||
mapPosX = int((translation_vector[0] + 500) / 1000 * 40)
|
||||
mapPosY = int((translation_vector[1] + 500) / 1000 * 40)
|
||||
|
@ -284,7 +341,6 @@ while True:
|
|||
# draw rotation vector
|
||||
cv2.circle(im, (mapPosZ + 60, mapPosY + 10), 2, (0,0,255), -1)
|
||||
|
||||
viewDirectionVector = np.dot(np.array([0.0, 0.0, 1000.0]), rotMatrix)
|
||||
cv2.line(im, (mapPosZ + 10, mapPosX + 10), (mapPosZ + 10 + int(viewDirectionVector[2] * 100), mapPosX + 10 + int(viewDirectionVector[0] * 100)), (255,255,0), 1)
|
||||
cv2.line(im, (mapPosZ + 60, mapPosY + 10), (mapPosZ + 60 + int(viewDirectionVector[2] * 100), mapPosY + 10 - int(viewDirectionVector[1] * 100)), (255,0,255), 1)
|
||||
|
||||
|
@ -318,6 +374,8 @@ while True:
|
|||
|
||||
# processed all faces, now draw on screen:
|
||||
te1 = time.time()
|
||||
|
||||
if not args.hide_preview:
|
||||
# draw little floorplan for 10 -> 50, sideplan 60 -> 100 (40x40 px)
|
||||
cv2.rectangle(im, (9, 9), (51, 51), (255,255,255), 1)
|
||||
cv2.rectangle(im, (59, 9), (101, 51), (255,255,255), 1)
|
||||
|
@ -328,6 +386,7 @@ while True:
|
|||
cv2.rectangle(im, (9, 59), (91, 111), (255,255,255), 1)
|
||||
|
||||
if transform is None:
|
||||
if not args.hide_preview:
|
||||
cv2.putText(im, "1", (10,70), cv2.FONT_HERSHEY_PLAIN, .7, (255,255,255) if coordinates['tl'] is not None else (0,0,255))
|
||||
cv2.putText(im, "2", (85,70), cv2.FONT_HERSHEY_PLAIN, .7, (255,255,255) if coordinates['tr'] is not None else (0,0,255))
|
||||
cv2.putText(im, "3", (10,110), cv2.FONT_HERSHEY_PLAIN, .7, (255,255,255) if coordinates['bl'] is not None else (0,0,255))
|
||||
|
@ -346,9 +405,9 @@ while True:
|
|||
# dot2 = np.dot(coordinates['bl'] - point, coordinates['tl'] - coordinates['br'])
|
||||
# pointIn3 = [point[0], point[1], 0]
|
||||
# targetPoint = np.dot(pointIn3, transformationMatrix)
|
||||
# print("Looking at", pointIn3, np.dot( transformationMatrix, pointIn3))
|
||||
# logger.info("Looking at", pointIn3, np.dot( transformationMatrix, pointIn3))
|
||||
targetPoint = transform(point)
|
||||
print("Looking at", point, targetPoint)
|
||||
logger.info("Looking at {} {}".format(point, targetPoint) )
|
||||
# cv2.circle(im, (int(targetPoint[0]), int(targetPoint[1])), 2, (0,255,0), -1)
|
||||
# from 1920x1080 to 80x50
|
||||
miniTargetPoint = (int(targetPoint[0] / 1920 * 80 + 10), int(targetPoint[1] / 1080 * 50 + 60))
|
||||
|
@ -356,9 +415,11 @@ while True:
|
|||
targetInt = (int(targetPoint[0]), int(targetPoint[1]))
|
||||
# check if point fits on screen:
|
||||
# if so, measure it
|
||||
if targetInt[0] >= 0 and targetInt[1] >= 0 and targetInt[0] < metricsSize[1] and targetInt[1] < metricsSize[0]:
|
||||
if targetInt[0] >= 0 and targetInt[1] >= 0 and targetInt[0] < metricsSize[0] and targetInt[1] < metricsSize[1]:
|
||||
dataframe = dataframe.append({'x':targetInt[0],'y':targetInt[1]}, ignore_index=True)
|
||||
logger.debug("Put metric {},{} in metrix of {},{}".format(targetInt[1],targetInt[0], metricsSize[1], metricsSize[0]))
|
||||
newMetrics[targetInt[1],targetInt[0]] += 1
|
||||
|
||||
# after we collected all new metrics, blur them foor smoothness
|
||||
# and add to all metrics collected
|
||||
tm3 = time.time()
|
||||
|
@ -369,6 +430,7 @@ while True:
|
|||
# Display webcam image with overlays
|
||||
te2 = time.time()
|
||||
logger.debug("Drew on screen in %fs", te2-te1)
|
||||
if not args.hide_preview:
|
||||
cv2.imshow("Output", im)
|
||||
te3 = time.time()
|
||||
logger.debug("showed webcam image in %fs", te3-te2)
|
||||
|
@ -383,7 +445,7 @@ while True:
|
|||
normalisedMetrics = np.uint8(cm.plasma(normalisedMetrics)*255)
|
||||
tm22 = time.time()
|
||||
logger.debug("Max normalised metrics: %f", np.max(normalisedMetrics))
|
||||
# print(normalisedMetrics)
|
||||
# logger.info(normalisedMetrics)
|
||||
tm23 = time.time()
|
||||
image = Image.fromarray(normalisedMetrics)
|
||||
wpercent = (imageWindowSize[0] / float(image.size[0]))
|
||||
|
@ -394,9 +456,10 @@ while True:
|
|||
imagesprite = imageCanvas.create_image(500,500,image=tkpi, tags="IMG")
|
||||
imageWindowRoot.update()
|
||||
tm24 = time.time()
|
||||
logger.debug("PIL iamge generated in %fs", tm24 - tm23)
|
||||
logger.debug("PIL image generated in %fs", tm24 - tm23)
|
||||
logger.debug("Total matrix time is %fs", tm4 - tm3 + tm2 - tm1 + tm24 - tm21)
|
||||
|
||||
if not args.hide_graph:
|
||||
te4 = time.time()
|
||||
axes.clear()
|
||||
if(len(dataframe) > 2):
|
||||
|
@ -406,6 +469,20 @@ while True:
|
|||
te5 = time.time()
|
||||
logger.debug("Drew graph & updated window in %fs", te5-te4)
|
||||
|
||||
if args.output_dir:
|
||||
# save output to dir
|
||||
now = tm24 # time.time()
|
||||
if now - lastSaveTime > args.save_interval:
|
||||
filename = os.path.join(
|
||||
args.output_dir,
|
||||
"frame{}.png".format(
|
||||
datetime.datetime.now().replace(microsecond=0).isoformat()
|
||||
)
|
||||
)
|
||||
image.save(filename)
|
||||
lastSaveTime = now
|
||||
pass
|
||||
|
||||
# (optionally) very slowly fade out previous metrics:
|
||||
# metrics = metrics * .999
|
||||
|
||||
|
@ -430,10 +507,10 @@ while True:
|
|||
coordinates['br'] = currentPoint
|
||||
recalculate = True
|
||||
elif keyPress == ord('t') and transform is not None:
|
||||
print("Coordinates", coordinates)
|
||||
print("Drawing area", screenDrawCorners)
|
||||
print("Test point %s", currentPoint )
|
||||
print("Transformed point %s", transform(currentPoint))
|
||||
logger.info("Coordinates {}".format(coordinates) )
|
||||
logger.info("Drawing area {}".format(screenDrawCorners))
|
||||
logger.info("Test point {}".format(currentPoint ))
|
||||
logger.info("Transformed point {}".format(transform(currentPoint)))
|
||||
|
||||
|
||||
if recalculate is True and not any (x is None for x in coordinates.values()):
|
||||
|
@ -446,4 +523,3 @@ while True:
|
|||
|
||||
|
||||
cv2.destroyAllWindows()
|
||||
|
||||
|
|
3
output/.gitignore
vendored
Normal file
3
output/.gitignore
vendored
Normal file
|
@ -0,0 +1,3 @@
|
|||
*
|
||||
!.gitignore
|
||||
|
Loading…
Reference in a new issue