12 KiB
12 KiB
In [1]:
#hide
import pyopenpose as op
import ipywidgets
from ipywebrtc import CameraStream
import cv2
In [2]:
!pip3 install tqdm
In [3]:
from tqdm.notebook import tqdm
Jupyter notebook for OpenPose experiments¶
Read any image or video from the /data
directory and render the output here
In [4]:
!nvidia-smi
In [5]:
# CameraStream.facing_user(audio=False)
# requires SSL website... :-(
In [6]:
params = dict()
params["model_folder"] = "/openpose/models/"
# params["face"] = True
params["hand"] = True
# params["heatmaps_add_parts"] = True
# params["heatmaps_add_bkg"] = True
# params["heatmaps_add_PAFs"] = True
# params["heatmaps_scale"] = 3
# params["upsampling_ratio"] = 1
# params["body"] = 1
In [7]:
# Starting OpenPose
opWrapper = op.WrapperPython()
opWrapper.configure(params)
opWrapper.start()
In [8]:
# upload = ipywidgets.FileUpload()
# display(upload)
In [ ]:
In [9]:
# # Process Image
# datum = op.Datum()
# imageToProcess = cv2.imread(args[0].image_path)
# datum.cvInputData = imageToProcess
# opWrapper.emplaceAndPop(op.VectorDatum([datum]))
# # Display Image
# print("Body keypoints: \n" + str(datum.poseKeypoints))
# print("Face keypoints: \n" + str(datum.faceKeypoints))
# print("Left hand keypoints: \n" + str(datum.handKeypoints[0]))
# print("Right hand keypoints: \n" + str(datum.handKeypoints[1]))
# cv2.imwrite("/data/result_body.jpg",datum.cvOutputData)
# print(dir(datum))
# # cv2.imshow("OpenPose 1.7.0 - Tutorial Python API", datum.cvOutputData)
# cv2.waitKey(0)
In [ ]:
In [ ]:
In [39]:
vid_capture = cv2.VideoCapture('/data/0001-0500.mp4')
# Create a video capture object, in this case we are reading the video from a file
if (vid_capture.isOpened() == False):
print("Error opening the video file")
# Read fps and frame count
else:
# Get frame rate information
# You can replace 5 with CAP_PROP_FPS as well, they are enumerations
fps = vid_capture.get(5)
print('Frames per second : ', fps,'FPS')
# Get frame count
# You can replace 7 with CAP_PROP_FRAME_COUNT as well, they are enumerations
frame_count = vid_capture.get(7)
print('Frame count : ', frame_count)
In [40]:
frame_size = (int(vid_capture.get(cv2.CAP_PROP_FRAME_WIDTH)), int(vid_capture.get(cv2.CAP_PROP_FRAME_HEIGHT
)))
In [41]:
frame_size
Out[41]:
In [44]:
output = cv2.VideoWriter(
'/data/output.mp4',
# see http://mp4ra.org/#/codecs for codecs
# cv2.VideoWriter_fourcc('m','p','4','v'),
# cv2.VideoWriter_fourcc(*'mp4v'),
# cv2.VideoWriter_fourcc('a','v','1','C'),
cv2.VideoWriter_fourcc(*'vp09'),
# cv2.VideoWriter_fourcc(*'avc1'),
fps,
frame_size)
In [45]:
# See also:
# with tqdm.wrapattr(file_obj, "read", total=file_obj.size) as fobj:
# ... while True:
# ... chunk = fobj.read(chunk_size)
# ... if not chunk:
# ... break
t = tqdm(total=frame_count) # Initialise
while(vid_capture.isOpened()):
t.update(1)
# with tqdm.wrapattr(vid_capture, "read", total=frame_count) as vid_obj:
# # vid_capture.read() methods returns a tuple, first element is a bool
# and the second is frame
# while True:
ret, frame = vid_capture.read()
if ret == True:
datum = op.Datum()
datum.cvInputData = frame
opWrapper.emplaceAndPop(op.VectorDatum([datum]))
# Display Image
# print("Body keypoints: \n" + str(datum.poseKeypoints))
# print("Face keypoints: \n" + str(datum.faceKeypoints))
# print("Left hand keypoints: \n" + str(datum.handKeypoints[0]))
# print("Right hand keypoints: \n" + str(datum.handKeypoints[1]))
#cv2.imwrite(f"/data/out/result_body_scale{i:03d}.jpg",datum.cvOutputData)
output.write(datum.cvOutputData)
else:
print("Stream ended")
break
t.close()
In [46]:
# Release the objects
vid_capture.release()
output.release()
In [47]:
# display(HTML("""<video width="100" height="100" controls><source src="/data/output.mp4" type="video/mp4"></video>"""))
ipywidgets.Video.from_file('/data/outputs.mp4')
Out[47]:
In [ ]:
# from IPython.display import Video
# Video('/data/outputs.mp4', embed=True)
In [ ]: