|
|
|
@ -76,6 +76,11 @@ argParser.add_argument(
|
|
|
|
|
default=4,
|
|
|
|
|
help="Nr of total processes (min 3)"
|
|
|
|
|
)
|
|
|
|
|
argParser.add_argument(
|
|
|
|
|
'--only-metrics',
|
|
|
|
|
action="store_true",
|
|
|
|
|
help="Render only metrics instead of the heatmap. Convenient for debugging."
|
|
|
|
|
)
|
|
|
|
|
|
|
|
|
|
args = argParser.parse_args()
|
|
|
|
|
|
|
|
|
@ -88,10 +93,7 @@ logger = logging.getLogger(__name__)
|
|
|
|
|
|
|
|
|
|
# im = cv2.imread("headPose.jpg");
|
|
|
|
|
|
|
|
|
|
spotSize = (100,100)
|
|
|
|
|
spot = Image.open(os.path.join(cur_dir,"spot.png")).convert('L')
|
|
|
|
|
spot = spot.resize(spotSize)
|
|
|
|
|
spot = np.array(spot)
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
predictor_path = os.path.join(cur_dir,"shape_predictor_68_face_landmarks.dat")
|
|
|
|
@ -106,11 +108,28 @@ screenDrawCorners = np.array([[10,60], [90, 60], [10, 110], [90, 110]])
|
|
|
|
|
|
|
|
|
|
# metrics matrix
|
|
|
|
|
metricsSize = [1920,1080]
|
|
|
|
|
metricsSize = [1280,800]
|
|
|
|
|
metricsSize = [960,600]
|
|
|
|
|
# metricsSize = [1280,800]
|
|
|
|
|
# metricsSize = [960,600]
|
|
|
|
|
metricsSize = [1080,1080] # no point in having it different from to the render size
|
|
|
|
|
dataframe = pd.DataFrame(columns=['x','y'])
|
|
|
|
|
|
|
|
|
|
renderSize = [1280,800]
|
|
|
|
|
renderSize = [1080,1080]
|
|
|
|
|
|
|
|
|
|
# Used to create a black backdrop, instead of the ugly Qt-gray, if neccessary
|
|
|
|
|
screenSize = [1920,1080]
|
|
|
|
|
|
|
|
|
|
spotS = int(100./720*renderSize[1])
|
|
|
|
|
spotSize = (spotS, spotS)
|
|
|
|
|
|
|
|
|
|
spot = Image.open(os.path.join(cur_dir,"spot.png")).convert('L')
|
|
|
|
|
spot = spot.resize(spotSize)
|
|
|
|
|
spot = np.array(spot)
|
|
|
|
|
|
|
|
|
|
backdrop = None
|
|
|
|
|
if screenSize != renderSize:
|
|
|
|
|
shape = [screenSize[1],screenSize[0], 3]
|
|
|
|
|
backdrop = np.zeros(shape, dtype=np.uint8)
|
|
|
|
|
|
|
|
|
|
metrics = None
|
|
|
|
|
if lastMetricsFilename and os.path.isfile(lastMetricsFilename):
|
|
|
|
@ -231,7 +250,7 @@ def coordinatesToSrc(coordinates):
|
|
|
|
|
# coordinates of the screen boundaries
|
|
|
|
|
if os.path.exists("coordinates.p"):
|
|
|
|
|
coordinates = pickle.load(open("coordinates.p", "rb"))
|
|
|
|
|
transform = create_perspective_transform(coordinatesToSrc(coordinates), screenDrawCorners)
|
|
|
|
|
transform = create_perspective_transform(coordinatesToSrc(coordinates), screenDrawCorners, True)
|
|
|
|
|
|
|
|
|
|
a = [np.array([ 1312.15541183]), np.array([ 244.56278002]), 0]
|
|
|
|
|
logger.info("Loaded coordinates: %s", coordinatesToSrc(coordinates))
|
|
|
|
@ -306,6 +325,7 @@ def captureFacesPoints(i):
|
|
|
|
|
|
|
|
|
|
# We use this later for calibrating
|
|
|
|
|
currentPoint = None
|
|
|
|
|
currentVectors = None
|
|
|
|
|
currentPoints = []
|
|
|
|
|
|
|
|
|
|
if len(dets) > 0:
|
|
|
|
@ -420,13 +440,14 @@ def captureFacesPoints(i):
|
|
|
|
|
# x = translation_vector[0] + rotation_vector[0]* a
|
|
|
|
|
# y = translation_vector[1] + rotation_vector[1] * a
|
|
|
|
|
# logger.warn("First {} {},{}".format(a,x,y))
|
|
|
|
|
a = - translation_vector[2]# / viewDirectionVector[2]
|
|
|
|
|
a = translation_vector[2] / viewDirectionVector[2]
|
|
|
|
|
x = translation_vector[0] + viewDirectionVector[0] * a
|
|
|
|
|
y = translation_vector[1] + viewDirectionVector[1] * a
|
|
|
|
|
# logger.warn("Second {} {},{}".format(a,x,y))
|
|
|
|
|
point = np.array([x,y])
|
|
|
|
|
|
|
|
|
|
currentPoint = point
|
|
|
|
|
currentVectors = {'translation': translation_vector, 'rotation': viewDirectionVector}
|
|
|
|
|
currentPoints.append(point)
|
|
|
|
|
|
|
|
|
|
td3 = time.time()
|
|
|
|
@ -434,7 +455,7 @@ def captureFacesPoints(i):
|
|
|
|
|
|
|
|
|
|
# TODO only draw nose line now, so we can change color depending whether on screen or not
|
|
|
|
|
|
|
|
|
|
results = {'currentPoint': currentPoint, 'currentPoints': currentPoints}
|
|
|
|
|
results = {'currentPoint': currentPoint, 'currentPoints': currentPoints, 'currentVectors': currentVectors}
|
|
|
|
|
results['im'] = im if not args.hide_preview else None
|
|
|
|
|
|
|
|
|
|
try:
|
|
|
|
@ -445,11 +466,14 @@ def captureFacesPoints(i):
|
|
|
|
|
|
|
|
|
|
def captureVideo():
|
|
|
|
|
c = cv2.VideoCapture(args.camera)
|
|
|
|
|
# if not ding this we only have jittery 10fps
|
|
|
|
|
c.set(cv2.CAP_PROP_FOURCC, cv2.VideoWriter_fourcc(*"MJPG"))
|
|
|
|
|
|
|
|
|
|
# set camera resoltion
|
|
|
|
|
# c.set(3, 1280)
|
|
|
|
|
# c.set(4, 720)
|
|
|
|
|
c.set(3, 960)
|
|
|
|
|
c.set(4, 540)
|
|
|
|
|
c.set(3, 1280)
|
|
|
|
|
c.set(4, 720)
|
|
|
|
|
# c.set(3, 960)
|
|
|
|
|
# c.set(4, 540)
|
|
|
|
|
logger.debug("Camera FPS: {}".format(c.get(5)))
|
|
|
|
|
|
|
|
|
|
while True:
|
|
|
|
@ -462,225 +486,280 @@ def captureVideo():
|
|
|
|
|
logger.debug("Que sizes: image: {}, points: {} ".format(photoQueue.qsize(), pointsQueue.qsize()))
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
processes = []
|
|
|
|
|
for i in range(args.processes - 2):
|
|
|
|
|
p = multiprocessing.Process(target=captureFacesPoints, args=(i,))
|
|
|
|
|
if __name__ == '__main__':
|
|
|
|
|
processes = []
|
|
|
|
|
for i in range(args.processes - 2):
|
|
|
|
|
p = multiprocessing.Process(target=captureFacesPoints, args=(i,))
|
|
|
|
|
p.daemon = True
|
|
|
|
|
p.start()
|
|
|
|
|
processes.append(p)
|
|
|
|
|
|
|
|
|
|
p = multiprocessing.Process(target=captureVideo, args=())
|
|
|
|
|
p.daemon = True
|
|
|
|
|
p.start()
|
|
|
|
|
processes.append(p)
|
|
|
|
|
|
|
|
|
|
p = multiprocessing.Process(target=captureVideo, args=())
|
|
|
|
|
p.daemon = True
|
|
|
|
|
p.start()
|
|
|
|
|
processes.append(p)
|
|
|
|
|
|
|
|
|
|
newMetrics = np.zeros((metricsSize[1], metricsSize[0]))
|
|
|
|
|
lastRunTime = 0
|
|
|
|
|
|
|
|
|
|
while True:
|
|
|
|
|
result = None
|
|
|
|
|
try:
|
|
|
|
|
|
|
|
|
|
newMetrics = np.zeros((metricsSize[1], metricsSize[0]))
|
|
|
|
|
lastRunTime = 0
|
|
|
|
|
|
|
|
|
|
while True:
|
|
|
|
|
result = None
|
|
|
|
|
te1 = time.time()
|
|
|
|
|
result = pointsQueue.get()
|
|
|
|
|
te1b = time.time()
|
|
|
|
|
im = result['im']
|
|
|
|
|
currentPoint = result['currentPoint']
|
|
|
|
|
currentPoints = result['currentPoints']
|
|
|
|
|
except queue.Empty as e:
|
|
|
|
|
logger.warn('Result queue empty')
|
|
|
|
|
|
|
|
|
|
if result is not None:
|
|
|
|
|
if not args.hide_preview:
|
|
|
|
|
# draw little floorplan for 10 -> 50, sideplan 60 -> 100 (40x40 px)
|
|
|
|
|
cv2.rectangle(im, (9, 9), (51, 51), (255,255,255), 1)
|
|
|
|
|
cv2.rectangle(im, (59, 9), (101, 51), (255,255,255), 1)
|
|
|
|
|
cv2.line(im, (10,10), (10,50), (200,200,200), 2)
|
|
|
|
|
cv2.line(im, (60,10), (60,50), (200,200,200), 2)
|
|
|
|
|
|
|
|
|
|
# screen is 16:10
|
|
|
|
|
cv2.rectangle(im, (9, 59), (91, 111), (255,255,255), 1)
|
|
|
|
|
|
|
|
|
|
if transform is None:
|
|
|
|
|
try:
|
|
|
|
|
result = pointsQueue.get()
|
|
|
|
|
te1b = time.time()
|
|
|
|
|
im = result['im']
|
|
|
|
|
currentPoint = result['currentPoint']
|
|
|
|
|
currentPoints = result['currentPoints']
|
|
|
|
|
currentVectors = result['currentVectors']
|
|
|
|
|
except queue.Empty as e:
|
|
|
|
|
logger.warn('Result queue empty')
|
|
|
|
|
|
|
|
|
|
tr1 = time.time()
|
|
|
|
|
if result is not None:
|
|
|
|
|
if not args.hide_preview:
|
|
|
|
|
cv2.putText(im, "1", (10,70), cv2.FONT_HERSHEY_PLAIN, .7, (255,255,255) if coordinates['tl'] is not None else (0,0,255))
|
|
|
|
|
cv2.putText(im, "2", (85,70), cv2.FONT_HERSHEY_PLAIN, .7, (255,255,255) if coordinates['tr'] is not None else (0,0,255))
|
|
|
|
|
cv2.putText(im, "3", (10,110), cv2.FONT_HERSHEY_PLAIN, .7, (255,255,255) if coordinates['bl'] is not None else (0,0,255))
|
|
|
|
|
cv2.putText(im, "4", (85,110), cv2.FONT_HERSHEY_PLAIN, .7, (255,255,255) if coordinates['br'] is not None else (0,0,255))
|
|
|
|
|
tm1 = 0
|
|
|
|
|
tm2 = 0
|
|
|
|
|
tm3 = 0
|
|
|
|
|
tm4 = 0
|
|
|
|
|
else:
|
|
|
|
|
for point in currentPoints:
|
|
|
|
|
# check if within coordinates:
|
|
|
|
|
# dot1 = np.dot(coordinates['tl'] - point, coordinates['tl'] - coordinates['br'])
|
|
|
|
|
# dot2 = np.dot(coordinates['bl'] - point, coordinates['tl'] - coordinates['br'])
|
|
|
|
|
# pointIn3 = [point[0], point[1], 0]
|
|
|
|
|
# targetPoint = np.dot(pointIn3, transformationMatrix)
|
|
|
|
|
# logger.info("Looking at", pointIn3, np.dot( transformationMatrix, pointIn3))
|
|
|
|
|
targetPoint = transform(point)
|
|
|
|
|
logger.info("Looking at {} {}".format(point, targetPoint) )
|
|
|
|
|
# cv2.circle(im, (int(targetPoint[0]), int(targetPoint[1])), 2, (0,255,0), -1)
|
|
|
|
|
# from 1920x1080 to 80x50
|
|
|
|
|
# draw little floorplan for 10 -> 50, sideplan 60 -> 100 (40x40 px)
|
|
|
|
|
cv2.rectangle(im, (9, 9), (51, 51), (255,255,255), 1)
|
|
|
|
|
cv2.rectangle(im, (59, 9), (101, 51), (255,255,255), 1)
|
|
|
|
|
cv2.line(im, (10,10), (10,50), (200,200,200), 2)
|
|
|
|
|
cv2.line(im, (60,10), (60,50), (200,200,200), 2)
|
|
|
|
|
|
|
|
|
|
# screen is 16:10
|
|
|
|
|
cv2.rectangle(im, (9, 59), (91, 111), (255,255,255), 1)
|
|
|
|
|
|
|
|
|
|
if transform is None:
|
|
|
|
|
if not args.hide_preview:
|
|
|
|
|
miniTargetPoint = (int(targetPoint[0] / metricsSize[0] * 80 + 10), int(targetPoint[1] / metricsSize[1] * 50 + 60))
|
|
|
|
|
cv2.circle(im, miniTargetPoint, 2, (0,255,0), -1)
|
|
|
|
|
targetInt = (int(targetPoint[0]), int(targetPoint[1]))
|
|
|
|
|
# check if point fits on screen:
|
|
|
|
|
# if so, measure it
|
|
|
|
|
if targetInt[0]+spotSize[0] >= 0 and targetInt[1]+spotSize[1] >= 0 and targetInt[0]-spotSize[0] < metricsSize[0] and targetInt[1]-spotSize[0] < metricsSize[1]:
|
|
|
|
|
dataframe = dataframe.append({'x':targetInt[0],'y':targetInt[1]}, ignore_index=True)
|
|
|
|
|
logger.info("Put metric {},{} in metrix of {},{}".format(targetInt[1],targetInt[0], metricsSize[1], metricsSize[0]))
|
|
|
|
|
#TODO: make it one numpy array action:
|
|
|
|
|
for sx in range(spotSize[0]):
|
|
|
|
|
for sy in range(spotSize[1]):
|
|
|
|
|
mx = targetInt[0] + sx - (spotSize[0]-1)/2
|
|
|
|
|
my = targetInt[1] + sy - (spotSize[1]-1)/2
|
|
|
|
|
|
|
|
|
|
if mx >= 0 and my >= 0 and mx < metricsSize[0] and my < metricsSize[1]:
|
|
|
|
|
newMetrics[my,mx] += spot[sx,sy] #/ 20
|
|
|
|
|
# print("MAX",np.max(newMetrics))
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
# after we collected all new metrics, blur them foor smoothness
|
|
|
|
|
# and add to all metrics collected
|
|
|
|
|
tm3 = time.time()
|
|
|
|
|
# metrics = metrics + gaussian_filter(newMetrics, sigma = 13)
|
|
|
|
|
|
|
|
|
|
tm4 = time.time()
|
|
|
|
|
# logger.debug("Updated matrix with blur in %f", tm4 - tm3 + tm2 - tm1)
|
|
|
|
|
|
|
|
|
|
# Display webcam image with overlays
|
|
|
|
|
te2 = time.time()
|
|
|
|
|
if result is not None and not args.hide_preview:
|
|
|
|
|
cv2.imshow("Output", im)
|
|
|
|
|
te3 = time.time()
|
|
|
|
|
logger.debug("showed webcam image in %fs", te3-te2)
|
|
|
|
|
logger.debug("Rendering took %fs", te3-te1)
|
|
|
|
|
logger.debug("Waited took %fs", te1b-te1)
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
# blur smooth the heatmap
|
|
|
|
|
# logger.debug("Max blurred metrics: %f", np.max(metrics))
|
|
|
|
|
|
|
|
|
|
# update the heatmap output
|
|
|
|
|
tm21 = time.time()
|
|
|
|
|
t = tm21
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
diffT = min(1, t - lastRunTime)
|
|
|
|
|
lastRunTime = t
|
|
|
|
|
# animDuration = 1
|
|
|
|
|
# factor = animDuration
|
|
|
|
|
|
|
|
|
|
metrics = metrics + newMetrics*diffT
|
|
|
|
|
newMetrics *= (1-diffT)
|
|
|
|
|
print('MAXES', np.max(metrics), np.max(newMetrics), diffT, t - lastRunTime)
|
|
|
|
|
|
|
|
|
|
# smooth impact of first hits by having at least 0.05
|
|
|
|
|
normalisedMetrics = metrics / (max(255*7 ,np.max(metrics)))
|
|
|
|
|
# convert to colormap, thanks to: https://stackoverflow.com/a/10967471
|
|
|
|
|
normalisedMetricsColored = np.uint8(cm.nipy_spectral(normalisedMetrics)*255)
|
|
|
|
|
normalisedMetricsColoredBGR = cv2.cvtColor(normalisedMetricsColored, cv2.COLOR_RGB2BGR)
|
|
|
|
|
|
|
|
|
|
tm22 = time.time()
|
|
|
|
|
logger.debug("Max normalised metrics: %f", np.max(normalisedMetrics))
|
|
|
|
|
# logger.info(normalisedMetrics)
|
|
|
|
|
tm23 = time.time()
|
|
|
|
|
|
|
|
|
|
cv2.imshow("test",normalisedMetricsColoredBGR)
|
|
|
|
|
# image = Image.fromarray(normalisedMetricsColored)
|
|
|
|
|
# wpercent = (imageWindowSize[0] / float(image.size[0]))
|
|
|
|
|
# hsize = int((float(image.size[1]) * float(wpercent)))
|
|
|
|
|
# renderImage = image.resize((renderSize[0], renderSize[1]))
|
|
|
|
|
# print(renderImage.size, "lala")
|
|
|
|
|
|
|
|
|
|
# if args.queue_length:
|
|
|
|
|
# imageQueue.append(image)
|
|
|
|
|
# if len(imageQueue) > args.queue_length:
|
|
|
|
|
# logger.warn("Use image from queue :-)")
|
|
|
|
|
# image = imageQueue.pop(0)
|
|
|
|
|
|
|
|
|
|
# tkpi = ImageTk.PhotoImage(renderImage)
|
|
|
|
|
# imageCanvas.delete("IMG")
|
|
|
|
|
# imagesprite = imageCanvas.create_image(renderSize[0]/2, renderSize[1]/2,image=tkpi, tags="IMG")
|
|
|
|
|
# imageWindowRoot.update()
|
|
|
|
|
tm24 = time.time()
|
|
|
|
|
logger.debug("PIL image generated in %fs", tm24 - tm23)
|
|
|
|
|
# logger.debug("Total matrix time is %fs", tm4 - tm3 + tm2 - tm1 + tm24 - tm21)
|
|
|
|
|
|
|
|
|
|
if not args.hide_graph:
|
|
|
|
|
te4 = time.time()
|
|
|
|
|
axes.clear()
|
|
|
|
|
if(len(dataframe) > 2):
|
|
|
|
|
g = sns.kdeplot(dataframe['x'], dataframe['y'],ax=axes, n_levels=30, shade=True, cmap=cm.rainbow)
|
|
|
|
|
canvas.draw()
|
|
|
|
|
windowRoot.update()
|
|
|
|
|
te5 = time.time()
|
|
|
|
|
logger.debug("Drew graph & updated window in %fs", te5-te4)
|
|
|
|
|
|
|
|
|
|
if args.output_dir:
|
|
|
|
|
# save output to dir
|
|
|
|
|
now = tm24 # time.time()
|
|
|
|
|
if now - lastSaveTime > args.save_interval:
|
|
|
|
|
filename = os.path.join(
|
|
|
|
|
args.output_dir,
|
|
|
|
|
"frame{}.png".format(
|
|
|
|
|
datetime.datetime.now().replace(microsecond=0).isoformat()
|
|
|
|
|
)
|
|
|
|
|
cv2.putText(im, "1", (10,70), cv2.FONT_HERSHEY_PLAIN, .7, (255,255,255) if coordinates['tl'] is not None else (0,0,255))
|
|
|
|
|
cv2.putText(im, "2", (85,70), cv2.FONT_HERSHEY_PLAIN, .7, (255,255,255) if coordinates['tr'] is not None else (0,0,255))
|
|
|
|
|
cv2.putText(im, "3", (10,110), cv2.FONT_HERSHEY_PLAIN, .7, (255,255,255) if coordinates['bl'] is not None else (0,0,255))
|
|
|
|
|
cv2.putText(im, "4", (85,110), cv2.FONT_HERSHEY_PLAIN, .7, (255,255,255) if coordinates['br'] is not None else (0,0,255))
|
|
|
|
|
tm1 = 0
|
|
|
|
|
tm2 = 0
|
|
|
|
|
tm3 = 0
|
|
|
|
|
tm4 = 0
|
|
|
|
|
else:
|
|
|
|
|
for point in currentPoints:
|
|
|
|
|
# check if within coordinates:
|
|
|
|
|
# dot1 = np.dot(coordinates['tl'] - point, coordinates['tl'] - coordinates['br'])
|
|
|
|
|
# dot2 = np.dot(coordinates['bl'] - point, coordinates['tl'] - coordinates['br'])
|
|
|
|
|
# pointIn3 = [point[0], point[1], 0]
|
|
|
|
|
# targetPoint = np.dot(pointIn3, transformationMatrix)
|
|
|
|
|
# logger.info("Looking at", pointIn3, np.dot( transformationMatrix, pointIn3))
|
|
|
|
|
targetPoint = transform(point)
|
|
|
|
|
logger.info("Looking at {} {}".format(point, targetPoint) )
|
|
|
|
|
# cv2.circle(im, (int(targetPoint[0]), int(targetPoint[1])), 2, (0,255,0), -1)
|
|
|
|
|
# from 1920x1080 to 80x50
|
|
|
|
|
if not args.hide_preview:
|
|
|
|
|
miniTargetPoint = (int(targetPoint[0] / metricsSize[0] * 80 + 10), int(targetPoint[1] / metricsSize[1] * 50 + 60))
|
|
|
|
|
cv2.circle(im, miniTargetPoint, 2, (0,255,0), -1)
|
|
|
|
|
targetInt = (int(targetPoint[0]), int(targetPoint[1]))
|
|
|
|
|
# check if point fits on screen:
|
|
|
|
|
# if so, measure it
|
|
|
|
|
if targetInt[0]+spotSize[0] >= 0 and targetInt[1]+spotSize[1] >= 0 and targetInt[0]-spotSize[0] < metricsSize[0] and targetInt[1]-spotSize[1] < metricsSize[1]:
|
|
|
|
|
if not args.hide_graph:
|
|
|
|
|
dataframe = dataframe.append({'x':targetInt[0],'y':targetInt[1]}, ignore_index=True)
|
|
|
|
|
|
|
|
|
|
logger.info("Put metric {},{} in metrics of {},{}".format(targetInt[1],targetInt[0], metricsSize[1], metricsSize[0]))
|
|
|
|
|
# newMetrics[targetInt[1]-1,targetInt[0]-1] += 1
|
|
|
|
|
|
|
|
|
|
#TODO: make it one numpy array action:
|
|
|
|
|
for sx in range(spotSize[0]):
|
|
|
|
|
for sy in range(spotSize[1]):
|
|
|
|
|
mx = targetInt[0] + sx - (spotSize[0]-1)/2
|
|
|
|
|
my = targetInt[1] + sy - (spotSize[1]-1)/2
|
|
|
|
|
|
|
|
|
|
if mx >= 0 and my >= 0 and mx < metricsSize[0] and my < metricsSize[1]:
|
|
|
|
|
newMetrics[my,mx] += spot[sx,sy] #/ 20
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
# after we collected all new metrics, blur them foor smoothness
|
|
|
|
|
# and add to all metrics collected
|
|
|
|
|
tm3 = time.time()
|
|
|
|
|
# metrics = metrics + gaussian_filter(newMetrics, sigma = 13)
|
|
|
|
|
|
|
|
|
|
tm4 = time.time()
|
|
|
|
|
# logger.debug("Updated matrix with blur in %f", tm4 - tm3 + tm2 - tm1)
|
|
|
|
|
|
|
|
|
|
# Display webcam image with overlays
|
|
|
|
|
te2 = time.time()
|
|
|
|
|
if result is not None and not args.hide_preview:
|
|
|
|
|
cv2.imshow("Output", im)
|
|
|
|
|
te3 = time.time()
|
|
|
|
|
logger.debug("Pre processing took: {}s".format(te2-tr1))
|
|
|
|
|
logger.debug("showed webcam image in %fs", te3-te2)
|
|
|
|
|
logger.debug("Rendering took %fs", te3-te1)
|
|
|
|
|
logger.debug("Waited took %fs", te1b-te1)
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
# blur smooth the heatmap
|
|
|
|
|
# logger.debug("Max blurred metrics: %f", np.max(metrics))
|
|
|
|
|
|
|
|
|
|
# update the heatmap output
|
|
|
|
|
tm21 = time.time()
|
|
|
|
|
t = tm21
|
|
|
|
|
|
|
|
|
|
diffT = min(1, t - lastRunTime)
|
|
|
|
|
lastRunTime = t
|
|
|
|
|
# animDuration = 1
|
|
|
|
|
# factor = animDuration
|
|
|
|
|
|
|
|
|
|
metrics = metrics + newMetrics*diffT
|
|
|
|
|
newMetrics *= (1-diffT)
|
|
|
|
|
print('MAXES', np.max(metrics), np.max(newMetrics), diffT, t - lastRunTime)
|
|
|
|
|
|
|
|
|
|
# smooth impact of first hits by having at least 0.05
|
|
|
|
|
normalisedMetrics = metrics / (max(255*7 ,np.max(metrics)))
|
|
|
|
|
# convert to colormap, thanks to: https://stackoverflow.com/a/10967471
|
|
|
|
|
|
|
|
|
|
if args.only_metrics:
|
|
|
|
|
# output only metrics instead of heatmap. Usefull for debugging O:)
|
|
|
|
|
nmax = np.max(newMetrics)
|
|
|
|
|
renderMetrics = newMetrics/nmax if nmax > 0 else newMetrics
|
|
|
|
|
normalisedMetricsColored = np.uint8(renderMetrics *255 )
|
|
|
|
|
normalisedMetricsColoredBGR = cv2.cvtColor(normalisedMetricsColored, cv2.COLOR_GRAY2BGR)
|
|
|
|
|
# draw grid lines
|
|
|
|
|
for i in range(int(metricsSize[0]/100)):
|
|
|
|
|
cv2.line(normalisedMetricsColoredBGR, (i*100, 0), (i*100, metricsSize[1]), (150,150,150), 1)
|
|
|
|
|
else:
|
|
|
|
|
normalisedMetricsColored = np.uint8(cm.nipy_spectral(normalisedMetrics)*255)
|
|
|
|
|
normalisedMetricsColoredBGR = cv2.cvtColor(normalisedMetricsColored, cv2.COLOR_RGB2BGR)
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
if currentPoint is not None and args.verbose:
|
|
|
|
|
cv2.putText(normalisedMetricsColoredBGR, "x: {}".format(currentPoint[0]), (10,70), cv2.FONT_HERSHEY_PLAIN, .7, (255,255,255))
|
|
|
|
|
cv2.putText(normalisedMetricsColoredBGR, "y: {}".format(currentPoint[1]), (10,90), cv2.FONT_HERSHEY_PLAIN, .7, (255,255,255))
|
|
|
|
|
|
|
|
|
|
cv2.putText(normalisedMetricsColoredBGR, "pos: x: {}, y: {}, z: {}".format(
|
|
|
|
|
currentVectors['translation'][0],
|
|
|
|
|
currentVectors['translation'][1],
|
|
|
|
|
currentVectors['translation'][2]
|
|
|
|
|
), (10,110), cv2.FONT_HERSHEY_PLAIN, .7, (255,255,255))
|
|
|
|
|
cv2.putText(
|
|
|
|
|
normalisedMetricsColoredBGR,
|
|
|
|
|
"rot: x: {}, y: {}, z{}".format(
|
|
|
|
|
currentVectors['rotation'][0],
|
|
|
|
|
currentVectors['rotation'][1],
|
|
|
|
|
currentVectors['rotation'][2],
|
|
|
|
|
), (10,130), cv2.FONT_HERSHEY_PLAIN, .7, (255,255,255))
|
|
|
|
|
targetPoint = transform(currentPoint)
|
|
|
|
|
logger.info("Are we really looking at {}".format(targetPoint))
|
|
|
|
|
logger.info("Size: {}".format(normalisedMetricsColoredBGR.shape))
|
|
|
|
|
cv2.circle(normalisedMetricsColoredBGR, targetPoint, 2, (0,255,0), -1)
|
|
|
|
|
cv2.line(
|
|
|
|
|
normalisedMetricsColoredBGR,
|
|
|
|
|
(metricsSize[0]/2,metricsSize[1]/2),
|
|
|
|
|
tuple(targetPoint),
|
|
|
|
|
(255,0,0), 2
|
|
|
|
|
)
|
|
|
|
|
cv2.imwrite(filename, normalisedMetricsColoredBGR)
|
|
|
|
|
# image.save(filename)
|
|
|
|
|
|
|
|
|
|
with open(lastMetricsFilename, 'wb') as fp:
|
|
|
|
|
pickle.dump( metrics, fp )
|
|
|
|
|
|
|
|
|
|
logger.debug("Saved frame to {}".format(filename))
|
|
|
|
|
lastSaveTime = now
|
|
|
|
|
|
|
|
|
|
# (optionally) very slowly fade out previous metrics:
|
|
|
|
|
metrics = metrics * .9997
|
|
|
|
|
|
|
|
|
|
keyPress = cv2.waitKey(5)
|
|
|
|
|
|
|
|
|
|
if keyPress==27:
|
|
|
|
|
break
|
|
|
|
|
elif keyPress == ord('d'):
|
|
|
|
|
logger.setLevel(logging.DEBUG)
|
|
|
|
|
elif keyPress > -1 and currentPoint is not None:
|
|
|
|
|
recalculate = False
|
|
|
|
|
if keyPress == ord('1'):
|
|
|
|
|
coordinates['tl'] = currentPoint
|
|
|
|
|
recalculate = True
|
|
|
|
|
logger.warn('Calibrate 1')
|
|
|
|
|
elif keyPress == ord('2'):
|
|
|
|
|
coordinates['tr'] = currentPoint
|
|
|
|
|
recalculate = True
|
|
|
|
|
logger.warn('Calibrate 2')
|
|
|
|
|
elif keyPress == ord('3'):
|
|
|
|
|
coordinates['bl'] = currentPoint
|
|
|
|
|
recalculate = True
|
|
|
|
|
logger.warn('Calibrate 3')
|
|
|
|
|
elif keyPress == ord('4'):
|
|
|
|
|
coordinates['br'] = currentPoint
|
|
|
|
|
recalculate = True
|
|
|
|
|
logger.warn('Calibrate 4')
|
|
|
|
|
elif keyPress == ord('t') and transform is not None:
|
|
|
|
|
logger.info("Coordinates {}".format(coordinates) )
|
|
|
|
|
logger.info("Drawing area {}".format(screenDrawCorners))
|
|
|
|
|
logger.info("Test point {}".format(currentPoint ))
|
|
|
|
|
logger.info("Transformed point {}".format(transform(currentPoint)))
|
|
|
|
|
|
|
|
|
|
if recalculate is True and not any (x is None for x in coordinates.values()):
|
|
|
|
|
logger.debug(coordinates.values())
|
|
|
|
|
pickle.dump( coordinates, open( "coordinates.p", "wb" ) )
|
|
|
|
|
logger.info("Saved coordinates")
|
|
|
|
|
|
|
|
|
|
transform = create_perspective_transform(coordinatesToSrc(coordinates), screenDrawCorners)
|
|
|
|
|
|
|
|
|
|
duration = time.time()-te1
|
|
|
|
|
fps = 1/duration
|
|
|
|
|
logger.info("Rendering loop %fs %ffps", duration, fps)
|
|
|
|
|
|
|
|
|
|
cv2.destroyAllWindows()
|
|
|
|
|
|
|
|
|
|
# cv2.putText(normalisedMetricsColoredBGR, "z: {}".format(currentPoint[2]), (10,70), cv2.FONT_HERSHEY_PLAIN, .7, (255,255,255))
|
|
|
|
|
|
|
|
|
|
tm22 = time.time()
|
|
|
|
|
logger.debug("Max normalised metrics: %f", np.max(normalisedMetrics))
|
|
|
|
|
# logger.info(normalisedMetrics)
|
|
|
|
|
tm23 = time.time()
|
|
|
|
|
|
|
|
|
|
# normalisedMetricsColoredBGR = cv2.resize(normalisedMetricsColoredBGR, tuple(renderSize))
|
|
|
|
|
if backdrop is not None:
|
|
|
|
|
dx = (screenSize[0] - renderSize[0]) / 2
|
|
|
|
|
dy = (screenSize[1] - renderSize[1]) / 2
|
|
|
|
|
print(dx, dy)
|
|
|
|
|
backdrop[dy:dy+renderSize[1], dx:dx+renderSize[0]] = normalisedMetricsColoredBGR
|
|
|
|
|
renderImage = backdrop
|
|
|
|
|
else:
|
|
|
|
|
renderImage = normalisedMetricsColoredBGR
|
|
|
|
|
cv2.imshow("test", renderImage)
|
|
|
|
|
# image = Image.fromarray(normalisedMetricsColored)
|
|
|
|
|
# wpercent = (imageWindowSize[0] / float(image.size[0]))
|
|
|
|
|
# hsize = int((float(image.size[1]) * float(wpercent)))
|
|
|
|
|
# renderImage = image.resize((renderSize[0], renderSize[1]))
|
|
|
|
|
# print(renderImage.size, "lala")
|
|
|
|
|
|
|
|
|
|
# if args.queue_length:
|
|
|
|
|
# imageQueue.append(image)
|
|
|
|
|
# if len(imageQueue) > args.queue_length:
|
|
|
|
|
# logger.warn("Use image from queue :-)")
|
|
|
|
|
# image = imageQueue.pop(0)
|
|
|
|
|
|
|
|
|
|
# tkpi = ImageTk.PhotoImage(renderImage)
|
|
|
|
|
# imageCanvas.delete("IMG")
|
|
|
|
|
# imagesprite = imageCanvas.create_image(renderSize[0]/2, renderSize[1]/2,image=tkpi, tags="IMG")
|
|
|
|
|
# imageWindowRoot.update()
|
|
|
|
|
tm24 = time.time()
|
|
|
|
|
logger.debug("Render in generated in %fs", tm24 - tm23)
|
|
|
|
|
# logger.debug("Total matrix time is %fs", tm4 - tm3 + tm2 - tm1 + tm24 - tm21)
|
|
|
|
|
|
|
|
|
|
if not args.hide_graph:
|
|
|
|
|
te4 = time.time()
|
|
|
|
|
axes.clear()
|
|
|
|
|
if(len(dataframe) > 2):
|
|
|
|
|
g = sns.kdeplot(dataframe['x'], dataframe['y'],ax=axes, n_levels=30, shade=True, cmap=cm.rainbow)
|
|
|
|
|
canvas.draw()
|
|
|
|
|
windowRoot.update()
|
|
|
|
|
te5 = time.time()
|
|
|
|
|
logger.debug("Drew graph & updated window in %fs", te5-te4)
|
|
|
|
|
|
|
|
|
|
if args.output_dir:
|
|
|
|
|
# save output to dir
|
|
|
|
|
now = tm24 # time.time()
|
|
|
|
|
if now - lastSaveTime > args.save_interval:
|
|
|
|
|
filename = os.path.join(
|
|
|
|
|
args.output_dir,
|
|
|
|
|
"frame{}.png".format(
|
|
|
|
|
datetime.datetime.now().replace(microsecond=0).isoformat()
|
|
|
|
|
)
|
|
|
|
|
)
|
|
|
|
|
cv2.imwrite(filename, normalisedMetricsColoredBGR)
|
|
|
|
|
# image.save(filename)
|
|
|
|
|
|
|
|
|
|
with open(lastMetricsFilename, 'wb') as fp:
|
|
|
|
|
pickle.dump( metrics, fp )
|
|
|
|
|
|
|
|
|
|
logger.debug("Saved frame to {}".format(filename))
|
|
|
|
|
lastSaveTime = now
|
|
|
|
|
|
|
|
|
|
# (optionally) very slowly fade out previous metrics:
|
|
|
|
|
metrics = metrics * .9997
|
|
|
|
|
|
|
|
|
|
keyPress = cv2.waitKey(5)
|
|
|
|
|
|
|
|
|
|
if keyPress==27:
|
|
|
|
|
break
|
|
|
|
|
elif keyPress == ord('d'):
|
|
|
|
|
logger.setLevel(logging.DEBUG)
|
|
|
|
|
elif keyPress > -1 and currentPoint is not None:
|
|
|
|
|
recalculate = False
|
|
|
|
|
if keyPress == ord('1'):
|
|
|
|
|
coordinates['tl'] = currentPoint
|
|
|
|
|
recalculate = True
|
|
|
|
|
logger.warn('Calibrate 1')
|
|
|
|
|
elif keyPress == ord('2'):
|
|
|
|
|
coordinates['tr'] = currentPoint
|
|
|
|
|
recalculate = True
|
|
|
|
|
logger.warn('Calibrate 2')
|
|
|
|
|
elif keyPress == ord('3'):
|
|
|
|
|
coordinates['bl'] = currentPoint
|
|
|
|
|
recalculate = True
|
|
|
|
|
logger.warn('Calibrate 3')
|
|
|
|
|
elif keyPress == ord('4'):
|
|
|
|
|
coordinates['br'] = currentPoint
|
|
|
|
|
recalculate = True
|
|
|
|
|
logger.warn('Calibrate 4')
|
|
|
|
|
elif keyPress == ord('t') and transform is not None:
|
|
|
|
|
logger.info("Coordinates {}".format(coordinates) )
|
|
|
|
|
logger.info("Drawing area {}".format(screenDrawCorners))
|
|
|
|
|
logger.info("Test point {}".format(currentPoint ))
|
|
|
|
|
logger.info("Transformed point {}".format(transform(currentPoint)))
|
|
|
|
|
|
|
|
|
|
if recalculate is True and not any (x is None for x in coordinates.values()):
|
|
|
|
|
logger.debug(coordinates.values())
|
|
|
|
|
pickle.dump( coordinates, open( "coordinates.p", "wb" ) )
|
|
|
|
|
logger.info("Saved coordinates")
|
|
|
|
|
|
|
|
|
|
transform = create_perspective_transform(coordinatesToSrc(coordinates), screenDrawCorners, True)
|
|
|
|
|
|
|
|
|
|
duration = time.time()-te1
|
|
|
|
|
fps = 1/duration
|
|
|
|
|
logger.info("Rendering loop %fs %ffps", duration, fps)
|
|
|
|
|
|
|
|
|
|
cv2.destroyAllWindows()
|
|
|
|
|