|
|
|
@ -27,6 +27,9 @@ import coloredlogs
|
|
|
|
|
import argparse
|
|
|
|
|
import multiprocessing
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
cur_dir = os.path.dirname(__file__)
|
|
|
|
|
|
|
|
|
|
argParser = argparse.ArgumentParser(description='Draw a heatmap')
|
|
|
|
|
argParser.add_argument(
|
|
|
|
|
'--camera',
|
|
|
|
@ -85,8 +88,13 @@ logger = logging.getLogger(__name__)
|
|
|
|
|
|
|
|
|
|
# im = cv2.imread("headPose.jpg");
|
|
|
|
|
|
|
|
|
|
spotSize = (100,100)
|
|
|
|
|
spot = Image.open(os.path.join(cur_dir,"spot.png")).convert('L')
|
|
|
|
|
spot = spot.resize(spotSize)
|
|
|
|
|
spot = np.array(spot)
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
predictor_path = "shape_predictor_68_face_landmarks.dat"
|
|
|
|
|
predictor_path = os.path.join(cur_dir,"shape_predictor_68_face_landmarks.dat")
|
|
|
|
|
|
|
|
|
|
if args.output_dir:
|
|
|
|
|
lastMetricsFilename = os.path.join(args.output_dir, 'last_metrics.p')
|
|
|
|
@ -425,7 +433,8 @@ def captureFacesPoints(i):
|
|
|
|
|
|
|
|
|
|
# TODO only draw nose line now, so we can change color depending whether on screen or not
|
|
|
|
|
|
|
|
|
|
results = {'currentPoint': currentPoint, 'currentPoints': currentPoints, 'im': im}
|
|
|
|
|
results = {'currentPoint': currentPoint, 'currentPoints': currentPoints}
|
|
|
|
|
results['im'] = im if not args.hide_preview else None
|
|
|
|
|
|
|
|
|
|
try:
|
|
|
|
|
pointsQueue.put_nowait(results)
|
|
|
|
@ -505,15 +514,24 @@ while True:
|
|
|
|
|
logger.info("Looking at {} {}".format(point, targetPoint) )
|
|
|
|
|
# cv2.circle(im, (int(targetPoint[0]), int(targetPoint[1])), 2, (0,255,0), -1)
|
|
|
|
|
# from 1920x1080 to 80x50
|
|
|
|
|
miniTargetPoint = (int(targetPoint[0] / 1920 * 80 + 10), int(targetPoint[1] / 1080 * 50 + 60))
|
|
|
|
|
cv2.circle(im, miniTargetPoint, 2, (0,255,0), -1)
|
|
|
|
|
if not args.hide_preview:
|
|
|
|
|
miniTargetPoint = (int(targetPoint[0] / 1920 * 80 + 10), int(targetPoint[1] / 1080 * 50 + 60))
|
|
|
|
|
cv2.circle(im, miniTargetPoint, 2, (0,255,0), -1)
|
|
|
|
|
targetInt = (int(targetPoint[0]), int(targetPoint[1]))
|
|
|
|
|
# check if point fits on screen:
|
|
|
|
|
# if so, measure it
|
|
|
|
|
if targetInt[0] >= 0 and targetInt[1] >= 0 and targetInt[0] < metricsSize[0] and targetInt[1] < metricsSize[1]:
|
|
|
|
|
if targetInt[0]+spotSize[0] >= 0 and targetInt[1]+spotSize[1] >= 0 and targetInt[0]-spotSize[0] < metricsSize[0] and targetInt[1]-spotSize[0] < metricsSize[1]:
|
|
|
|
|
dataframe = dataframe.append({'x':targetInt[0],'y':targetInt[1]}, ignore_index=True)
|
|
|
|
|
logger.debug("Put metric {},{} in metrix of {},{}".format(targetInt[1],targetInt[0], metricsSize[1], metricsSize[0]))
|
|
|
|
|
newMetrics[targetInt[1],targetInt[0]] += 1
|
|
|
|
|
logger.info("Put metric {},{} in metrix of {},{}".format(targetInt[1],targetInt[0], metricsSize[1], metricsSize[0]))
|
|
|
|
|
for sx in range(spotSize[0]):
|
|
|
|
|
for sy in range(spotSize[1]):
|
|
|
|
|
mx = targetInt[0] + sx - (spotSize[0]-1)/2
|
|
|
|
|
my = targetInt[1] + sy - (spotSize[1]-1)/2
|
|
|
|
|
|
|
|
|
|
if mx >= 0 and my >= 0 and mx < metricsSize[0] and my < metricsSize[1]:
|
|
|
|
|
newMetrics[my,mx] += spot[sx,sy] #/ 20
|
|
|
|
|
print("MAX",np.max(newMetrics))
|
|
|
|
|
|
|
|
|
|
# TODO: put in an image of a blurred spot & remove blur action
|
|
|
|
|
|
|
|
|
|
# after we collected all new metrics, blur them foor smoothness
|
|
|
|
@ -538,7 +556,7 @@ while True:
|
|
|
|
|
# update the heatmap output
|
|
|
|
|
tm21 = time.time()
|
|
|
|
|
# smooth impact of first hits by having at least 0.05
|
|
|
|
|
normalisedMetrics = metrics / (max(.02, np.max(metrics)))
|
|
|
|
|
normalisedMetrics = metrics / (max(255*4 ,np.max(metrics)))
|
|
|
|
|
# convert to colormap, thanks to: https://stackoverflow.com/a/10967471
|
|
|
|
|
normalisedMetricsColored = np.uint8(cm.nipy_spectral(normalisedMetrics)*255)
|
|
|
|
|
normalisedMetricsColoredBGR = cv2.cvtColor(normalisedMetricsColored, cv2.COLOR_RGB2BGR)
|
|
|
|
|