# From Python import sys import cv2 import os import argparse import subprocess import numpy as np import pyopenpose as op # Flags parser = argparse.ArgumentParser() parser.add_argument("--image_path", default="/data/human.jpg", help="Process an image. Read all standard formats (jpg, png, bmp, etc.).") parser.add_argument("--steps", default=100, type=int, help="Frames to render by stretching") args, unknown_args = parser.parse_known_args() # Custom Params (refer to include/openpose/flags.hpp for more parameters) # here: https://github.com/CMU-Perceptual-Computing-Lab/openpose/blob/fc7813cfbb93552905a7190e6805fd01a9c101a2/include/openpose/flags.hpp params = dict() params["model_folder"] = "/openpose/models/" # params["face"] = True # params["hand"] = True # params["heatmaps_add_parts"] = True # params["heatmaps_add_bkg"] = True # params["heatmaps_add_PAFs"] = True # params["heatmaps_scale"] = 3 # params["upsampling_ratio"] = 1 # params["body"] = 1 # Add others in path? for i in range(0, len(unknown_args)): curr_item = unknown_args[i] if i != len(unknown_args)-1: next_item = unknown_args[i+1] else: next_item = "1" if "--" in curr_item and "--" in next_item: key = curr_item.replace('-','') if key not in params: params[key] = "1" elif "--" in curr_item and "--" not in next_item: key = curr_item.replace('-','') if key not in params: params[key] = next_item # Construct it from system arguments # op.init_argv(unknown_args) # oppython = op.OpenposePython() original_image = cv2.imread(args.image_path) # Starting OpenPose opWrapper = op.WrapperPython() opWrapper.configure(params) opWrapper.start() # for i in range(args.steps): # factor = (i/args.steps) # mask = np.zeros(original_image.shape[:2], dtype="uint8") # cv2.rectangle(mask, (0, 0), (original_image.shape[1], int(original_image.shape[0] - original_image.shape[0] * factor)), 255, -1) # # cv2.imshow("Rectangular Mask", mask) # frame = cv2.bitwise_and(original_image, original_image, mask=mask) # # Process Image # datum = op.Datum() # datum.cvInputData = frame # opWrapper.emplaceAndPop(op.VectorDatum([datum])) # # Display Image # # print("Body keypoints: \n" + str(datum.poseKeypoints)) # # print("Face keypoints: \n" + str(datum.faceKeypoints)) # # print("Left hand keypoints: \n" + str(datum.handKeypoints[0])) # # print("Right hand keypoints: \n" + str(datum.handKeypoints[1])) # print(i, datum.poseKeypoints is not None) # cv2.imwrite(f"/data/out/result_body_obscure{i:03d}.jpg",datum.cvOutputData) # subprocess.call([ # 'ffmpeg', '-i', '/data/out/result_body_obscure%3d.jpg', '-y','/data/out/result_body_obscure.mp4' # ]) for i in range(args.steps): scale = 1 - (i/args.steps) frame = np.zeros(original_image.shape, dtype="uint8") width = int(original_image.shape[1] * scale) height = int(original_image.shape[0]) partial_frame = cv2.resize(original_image, (width, height)) # frame[y_start:y_end,x_start:x_end] = partial_frame offset = int((frame.shape[1] - width) / 2) # print(offset) frame[0:height,offset:(offset+width)] = partial_frame # Process Image datum = op.Datum() # print(frame.shape) datum.cvInputData = frame opWrapper.emplaceAndPop(op.VectorDatum([datum])) # Display Image # print("Body keypoints: \n" + str(datum.poseKeypoints)) # print("Face keypoints: \n" + str(datum.faceKeypoints)) # print("Left hand keypoints: \n" + str(datum.handKeypoints[0])) # print("Right hand keypoints: \n" + str(datum.handKeypoints[1])) print(i, datum.poseKeypoints is not None) cv2.imwrite(f"/data/out/result_body_scale{i:03d}.jpg",datum.cvOutputData) subprocess.call([ 'ffmpeg', '-i', '/data/out/result_body_scale%3d.jpg', '-y','/data/out/result_body_scale.mp4' ]) print("done") # cv2.imshow("OpenPose 1.7.0 - Tutorial Python API", datum.cvOutputData) # cv2.waitKey(0)