895 KiB
895 KiB
To install kernel run:
poetry run ipython kernel install --user --name=mpii
In [296]:
# imports
import os
from pathlib import Path
from scipy.io import loadmat
import scipy
import tqdm
import pickle
import logging
import time
import gc
%matplotlib
%matplotlib notebook
import matplotlib
import matplotlib.pyplot as plt
import numpy as np
from PIL import Image
from threading import Lock
In [46]:
# logging
logging.basicConfig(level=logging.WARNING)
logger = logging.getLogger('pose_ordering')
logger.setLevel(logging.INFO)
In [33]:
# config
mpii_idx_to_jnt = {0: 'rankl', 1: 'rknee', 2: 'rhip', 5: 'lankl', 4: 'lknee', 3: 'lhip',
6: 'pelvis', 7: 'thorax', 8: 'upper_neck', 11: 'relb', 10: 'rwri', 9: 'head',
12: 'rsho', 13: 'lsho', 14: 'lelb', 15: 'lwri'}
angles = [
['rwri', 'relb', 'rsho'],
['relb', 'rsho','thorax'],
['rsho','thorax', 'pelvis'],
['thorax','pelvis', 'rhip'],
['pelvis', 'rhip', 'rknee'],
[ 'rhip', 'rknee', 'rankl'],
['rsho','thorax','upper_neck'],
['lwri', 'lelb', 'lsho'],
['lelb', 'lsho','thorax'],
['lsho','thorax','pelvis'],
['thorax','pelvis','lhip'],
['pelvis','lhip', 'lknee'],
['lhip', 'lknee', 'lankl'],
['lsho','thorax','upper_neck'],
['thorax','upper_neck', 'head'],
]
bones = [
['rankl', 'rknee', 'orange'],
['rknee', 'rhip', 'orange'],
['rhip','pelvis', 'orange'],
['lankl', 'lknee', 'yellow'],
['lknee', 'lhip', 'yellow'],
['lhip','pelvis', 'yellow'],
['rwri', 'relb', 'red'],
['relb','rsho', 'red'],
['rsho','thorax', 'red'],
['lwri', 'lelb', 'blue'],
['lelb','lsho', 'blue'],
['lsho','thorax', 'blue'],
['thorax','upper_neck', 'pink'],
['upper_neck','head', 'pink'],
['thorax','pelvis', 'green'],
]
filename = 'mpii_human_pose_v1_u12_2/mpii_human_pose_v1_u12_1.mat'
#matplotlib plot size
default_dpi = matplotlib.rcParamsDefault['figure.dpi']
matplotlib.rcParams['figure.dpi'] = default_dpi*2
In [18]:
%timeit
if os.path.exists(filename + '.p'):
with open(filename + '.p', 'rb') as fp:
gc.disable() # speeds up pickle.load by ~30%
logger.debug('Loading pickled version')
mat = pickle.load(fp)
gc.enable()
else:
logger.debug(f'Loading {filename}')
mat = loadmat(filename)
with open(filename + '.p', 'wb') as fp:
pickle.dump(mat, fp)
In [19]:
mpii = mat['RELEASE'][0,0]
num_images = mpii['annolist'][0].shape[0]
num_images
Out[19]:
In [20]:
plt.plot([1,2], [-1,4])
# function to show the plot
plt.show()
In [233]:
has_values = 0
visualise = False
vectors = []
vector_points = []
for idx in tqdm.tqdm(range(num_images)):
# for idx in range(num_images):
# # is_training = mpii['img_train'][0,idx] # whether in train or test set
# person_ids = mpii['single_person'][idx][0].flatten()
# if not len(person_ids):
# # skip, because not enough persons
# continue
annotations = mpii['annolist'][0,idx]
anno_file = str(annotations[0]['name'][0][0][0])
filename = '/home/ruben/Documents/Projecten/2020/Security Vision/tryouts/MPII Human Pose Dataset/images/'+anno_file
logger.debug(filename)
if visualise:
image = Image.open(filename)
if not len(annotations['annorect']):
continue
for annotation in annotations['annorect'][0]:
# TODO : We might need to mirror the objects following a particular rule (see also Impett & Moretti)
try:
annotation_points = annotation['annopoints'][0,0]['point'][0]
except Exception as e:
# no points tagged for this one
continue
# logger.debug(points.shape[0])
points = {}
for point in annotation_points:
x = float(point['x'].flatten()[0])
y = float(point['y'].flatten()[0])
id_ = point['id'][0][0]
vis = point['is_visible'].flatten() if 'is_visible' in point else []
joint = mpii_idx_to_jnt[id_]
vis = int(vis[0]) if len(vis) else 0
points[joint] = np.array([x,y, vis])
if not all([joint in points for joint in mpii_idx_to_jnt.values()]):
logger.debug(f"Not enough points: {points=}")
break
# if 'rhip' not in points or 'lhip' not in points or 'thorax' not in points:
# logger.info(f"Not enough points: {points=}")
# continue
visible_joints = [joint for joint in mpii_idx_to_jnt.values() if joint in points]
if visualise:
plt.imshow(image)
plt.plot(np.array([points[joint][0] for joint in visible_joints]), np.array([points[joint][1] for joint in visible_joints]), 'o')
for bone in bones:
if not all([bone[0] in points, bone[1] in points]):
continue
if visualise:
plt.plot([points[bone[0]][0], points[bone[1]][0]], [points[bone[0]][1], points[bone[1]][1]], color=bone[2])
annotation_vector = []
for joints in angles:
if not all([p in points for p in joints]):
# check if all points to calculate joints are available
annotation_vector.append(None) # CHOICE store null
continue
v1 = points[joints[0]] - points[joints[1]]
v2 = points[joints[2]] - points[joints[1]]
angle = np.arctan2(v2[1], v2[0]) - np.arctan2(v1[1], v1[0])
annotation_vector.append(angle*angle) # CHOICE squared angle?
if visualise:
plt.text(int(points[joints[1]][0]), int(points[joints[1]][1]), f"{angle:.4}")
vector_points.append(points)
vectors.append([annotation_vector, idx, len(vector_points)-1])
has_values += 1
# print(annotations)
# break
if visualise:
plt.show() # show image
if has_values > 2:
break
In [144]:
print(len(vectors), vectors[1], vectors[-1])
In [118]:
from sklearn.preprocessing import StandardScaler
In [145]:
values = [v[0] for v in vectors]
In [265]:
x = StandardScaler().fit_transform(values) # normalizing the features
In [266]:
x.mean(), np.std(x)
Out[266]:
In [267]:
from sklearn.decomposition import PCA
In [268]:
# If 0 < n_components < 1 and svd_solver == 'full', select the number of components such that the amount of variance that needs to be explained is greater than the percentage specified by n_components.: https://medium.com/@ansjin/dimensionality-reduction-using-pca-on-multivariate-timeseries-data-b5cc07238dc4
pca = PCA(n_components=None)
principalComponents = pca.fit_transform(x)
In [269]:
pca.explained_variance_ratio_
Out[269]:
In [270]:
# pca.components_
# pca.get_precision()
plt.plot(np.cumsum(pca.explained_variance_ratio_))
plt.xlabel('number of components')
plt.ylabel('cumulative explained variance');
In [271]:
def get_catname(act) -> str:
return str(act['cat_name'][0][0]) if len(act['cat_name'][0]) else 'unknown'
In [294]:
# cats = [mpii['act'][v[1]]['cat_name'][0] for v in vectors]
cats= np.unique([get_catname(act) for act in mpii['act']]).tolist()
cats
Out[294]:
In [326]:
# t = [mpii['act'][v[1]]['act_id'][0][0] for v in vectors]
t = [cats.index(get_catname(mpii['act'][v[1]])) for v in vectors]
fig = plt.figure()
ax = fig.add_subplot(1,3,(1,2))
ax_figures = fig.add_subplot(1,3,3)
kdtree= scipy.spatial.KDTree(principalComponents[:,[0,1]]) # CHOICE only consider first two dimensions...
def show_closest(event):
closest = kdtree.query([event.xdata, event.ydata], workers=-1)
ax_figures.clear()
distance, vector_idx = closest
annotation_idx = vectors[vector_idx][1]
points_idx = vectors[vector_idx][2]
anno_points = np.array(list(vector_points[points_idx].values()))
ax_figures.plot(anno_points[:,0], anno_points[:,1]*-1, 'o')
for bone in bones:
if not all([bone[0] in vector_points[points_idx], bone[1] in vector_points[points_idx]]):
continue
ax_figures.plot([vector_points[points_idx][bone[0]][0], vector_points[points_idx][bone[1]][0]], [-vector_points[points_idx][bone[0]][1], -vector_points[points_idx][bone[1]][1]], color=bone[2])
# ax_figures.plot(np.random.rand(10))
def onclick(event):
show_closest(event)
processing = Lock()
def onmove(event):
if not event.xdata or not event.ydata:
return
if processing.acquire(blocking=False):
try:
show_closest(event)
finally:
processing.release()
cid = fig.canvas.mpl_connect('motion_notify_event', onmove)
cid = fig.canvas.mpl_connect('button_press_event', onclick)
ax.scatter(principalComponents[:,0], principalComponents[:,1], c=t, cmap="viridis",alpha=.3, marker='x', linewidth=.5)
Out[326]:
In [308]:
closest[1], closest_old[1]
Out[308]:
In [321]:
import random
Out[321]:
In [ ]:
nr = random.randint(0, len(vectors)-1)
imgidx = vectors[nr][1]
principalComponents[nr]
# vectors[6]
annotations = mpii['annolist'][0,imgidx]
anno_file = str(annotations[0]['name'][0][0][0])
filename = '/home/ruben/Documents/Projecten/2020/Security Vision/tryouts/MPII Human Pose Dataset/images/'+anno_file
print(mpii['act'][imgidx]['cat_name'],
mpii['act'][imgidx]['act_name'], mpii['act'][imgidx]['act_id'],
int(mpii['act'][imgidx]['act_id'][0][0])
)
Image.open(filename)
In [ ]: