openpose-docker-python/app/openpose.ipynb

12 KiB

In [1]:
#hide
import pyopenpose as op
import ipywidgets
from ipywebrtc import CameraStream
import cv2 
 
In [2]:
!pip3 install tqdm
Defaulting to user installation because normal site-packages is not writeable
Requirement already satisfied: tqdm in ./.local/lib/python3.8/site-packages (4.64.1)
In [3]:
 from tqdm.notebook import tqdm

Jupyter notebook for OpenPose experiments

Read any image or video from the /data directory and render the output here

In [4]:
!nvidia-smi
Wed Nov  2 15:54:10 2022       
+-----------------------------------------------------------------------------+
| NVIDIA-SMI 470.141.03   Driver Version: 470.141.03   CUDA Version: 11.4     |
|-------------------------------+----------------------+----------------------+
| GPU  Name        Persistence-M| Bus-Id        Disp.A | Volatile Uncorr. ECC |
| Fan  Temp  Perf  Pwr:Usage/Cap|         Memory-Usage | GPU-Util  Compute M. |
|                               |                      |               MIG M. |
|===============================+======================+======================|
|   0  NVIDIA GeForce ...  On   | 00000000:08:00.0 Off |                  N/A |
|  0%   41C    P5    61W / 350W |     21MiB / 24265MiB |      0%      Default |
|                               |                      |                  N/A |
+-------------------------------+----------------------+----------------------+
                                                                               
+-----------------------------------------------------------------------------+
| Processes:                                                                  |
|  GPU   GI   CI        PID   Type   Process name                  GPU Memory |
|        ID   ID                                                   Usage      |
|=============================================================================|
+-----------------------------------------------------------------------------+
In [5]:
# CameraStream.facing_user(audio=False)
# requires SSL website... :-(
In [6]:
params = dict()
params["model_folder"] = "/openpose/models/"
# params["face"] = True
params["hand"] = True
# params["heatmaps_add_parts"] = True
# params["heatmaps_add_bkg"] = True
# params["heatmaps_add_PAFs"] = True
# params["heatmaps_scale"] = 3
# params["upsampling_ratio"] = 1
# params["body"] = 1
In [7]:
# Starting OpenPose
opWrapper = op.WrapperPython()
opWrapper.configure(params)
opWrapper.start()
Starting OpenPose Python Wrapper...
Auto-detecting all available GPUs... Detected 1 GPU(s), using 1 of them starting at GPU 0.
In [8]:
# upload = ipywidgets.FileUpload()
# display(upload)
In [ ]:
 
In [9]:
# # Process Image
# datum = op.Datum()
# imageToProcess = cv2.imread(args[0].image_path)
# datum.cvInputData = imageToProcess
# opWrapper.emplaceAndPop(op.VectorDatum([datum]))

# # Display Image
# print("Body keypoints: \n" + str(datum.poseKeypoints))
# print("Face keypoints: \n" + str(datum.faceKeypoints))
# print("Left hand keypoints: \n" + str(datum.handKeypoints[0]))
# print("Right hand keypoints: \n" + str(datum.handKeypoints[1]))
# cv2.imwrite("/data/result_body.jpg",datum.cvOutputData)

# print(dir(datum))
# #    cv2.imshow("OpenPose 1.7.0 - Tutorial Python API", datum.cvOutputData)
#    cv2.waitKey(0)
In [ ]:
 
In [ ]:
 
In [39]:
vid_capture = cv2.VideoCapture('/data/0001-0500.mp4')

# Create a video capture object, in this case we are reading the video from a file
if (vid_capture.isOpened() == False):
  print("Error opening the video file")
# Read fps and frame count
else:
  # Get frame rate information
  # You can replace 5 with CAP_PROP_FPS as well, they are enumerations
  fps = vid_capture.get(5)
  print('Frames per second : ', fps,'FPS')
 
  # Get frame count
  # You can replace 7 with CAP_PROP_FRAME_COUNT as well, they are enumerations
  frame_count = vid_capture.get(7)
  print('Frame count : ', frame_count)
Frames per second :  24.0 FPS
Frame count :  500.0
In [40]:
frame_size = (int(vid_capture.get(cv2.CAP_PROP_FRAME_WIDTH)), int(vid_capture.get(cv2.CAP_PROP_FRAME_HEIGHT
)))
In [41]:
frame_size
Out[41]:
(1920, 1080)
In [44]:
output = cv2.VideoWriter(
    '/data/output.mp4',
    # see http://mp4ra.org/#/codecs for codecs
#     cv2.VideoWriter_fourcc('m','p','4','v'),
#      cv2.VideoWriter_fourcc(*'mp4v'),
#     cv2.VideoWriter_fourcc('a','v','1','C'),
    cv2.VideoWriter_fourcc(*'vp09'),
#     cv2.VideoWriter_fourcc(*'avc1'),
    fps,
    frame_size)
In [45]:
# See also:
# with tqdm.wrapattr(file_obj, "read", total=file_obj.size) as fobj:
# ...     while True:
# ...         chunk = fobj.read(chunk_size)
# ...         if not chunk:
# ...             break
t = tqdm(total=frame_count) # Initialise
while(vid_capture.isOpened()):
    t.update(1)
# with tqdm.wrapattr(vid_capture, "read", total=frame_count) as vid_obj:
#     # vid_capture.read() methods returns a tuple, first element is a bool 
    # and the second is frame
#     while True:
    ret, frame = vid_capture.read()
    if ret == True:
        datum = op.Datum()
        datum.cvInputData = frame
        opWrapper.emplaceAndPop(op.VectorDatum([datum]))

        # Display Image
        # print("Body keypoints: \n" + str(datum.poseKeypoints))
        # print("Face keypoints: \n" + str(datum.faceKeypoints))
        # print("Left hand keypoints: \n" + str(datum.handKeypoints[0]))
        # print("Right hand keypoints: \n" + str(datum.handKeypoints[1]))
        #cv2.imwrite(f"/data/out/result_body_scale{i:03d}.jpg",datum.cvOutputData)
        output.write(datum.cvOutputData)
    else:
        print("Stream ended")
        break
t.close()
  0%|          | 0/500.0 [00:00<?, ?it/s]
Stream ended
In [46]:
# Release the objects
vid_capture.release()
output.release()
In [47]:
# display(HTML("""<video width="100" height="100" controls><source src="/data/output.mp4" type="video/mp4"></video>"""))
ipywidgets.Video.from_file('/data/outputs.mp4')
Out[47]:
Video(value=b'\x00\x00\x00\x1cftypisom\x00\x00\x02\x00isomiso2mp41\x00\x00\x00\x08free\x00U\x0c\xe0...')
In [ ]:
# from IPython.display import Video
# Video('/data/outputs.mp4', embed=True)
In [ ]: