Compare commits

..

2 commits

Author SHA1 Message Date
Ruben van de Ven
cbab35e6d3 pototype for live tracking 2023-04-24 18:20:35 +02:00
Ruben van de Ven
7c3433e456 save to temp folder, in higher quality 2023-04-24 18:20:22 +02:00
3 changed files with 114 additions and 33 deletions

13
demo.py
View file

@ -31,6 +31,8 @@ from utils.log import logger
from utils.timer import Timer
from utils.parse_config import parse_model_cfg
import utils.datasets as datasets
from pathlib import Path
import tempfile
from track import eval_seq
@ -49,6 +51,8 @@ def track(opt):
n_frame = 0
logger.info('Starting tracking...')
with tempfile.TemporaryDirectory() as tmpdirname:
if os.path.isdir(opt.input_video):
print('Use image sequence')
dataloader = datasets.LoadImages(opt.input_video, opt.img_size)
@ -58,16 +62,17 @@ def track(opt):
frame_rate = dataloader.frame_rate
result_filename = os.path.join(result_root, 'results.txt')
frame_dir = None if opt.output_format=='text' else osp.join(result_root, 'frame')
frame_dir = None if opt.output_format=='text' else tmpdirname
try:
eval_seq(opt, dataloader, 'mot', result_filename,
save_dir=frame_dir, show_image=False, frame_rate=frame_rate)
save_dir=frame_dir, show_image=False, save_img=True, frame_rate=frame_rate)
except Exception as e:
logger.info(e)
if opt.output_format == 'video':
output_video_path = osp.join(result_root, 'result.mp4')
cmd_str = 'ffmpeg -f image2 -i {}/%05d.jpg -c:v copy {}'.format(osp.join(result_root, 'frame'), output_video_path)
name = 'result-' + Path(opt.input_video).stem + '.mp4'
output_video_path = osp.join(result_root, name)
cmd_str = 'ffmpeg -f image2 -i {}/%05d.jpg -c:v mjpeg -q:v 1 {}'.format(frame_dir, output_video_path)
os.system(cmd_str)

View file

@ -2,7 +2,7 @@
"cells": [
{
"cell_type": "code",
"execution_count": 47,
"execution_count": 64,
"id": "d1489f9f-328c-4812-9cdb-0a2dee44ae88",
"metadata": {},
"outputs": [],
@ -28,7 +28,7 @@
},
{
"cell_type": "code",
"execution_count": 48,
"execution_count": 65,
"id": "edec1b34-64ad-4610-856a-68d886a45142",
"metadata": {},
"outputs": [],
@ -38,7 +38,7 @@
},
{
"cell_type": "code",
"execution_count": 49,
"execution_count": 66,
"id": "010bf567-8845-46d4-8500-883efce2d010",
"metadata": {},
"outputs": [],
@ -51,7 +51,7 @@
},
{
"cell_type": "code",
"execution_count": 50,
"execution_count": 67,
"id": "8a413424-13c4-4bdc-825a-0aa6164e89e2",
"metadata": {},
"outputs": [],
@ -67,7 +67,7 @@
},
{
"cell_type": "code",
"execution_count": 51,
"execution_count": 68,
"id": "7b291b67-93ad-4b51-934a-dbaf095f7704",
"metadata": {},
"outputs": [],
@ -103,7 +103,28 @@
},
{
"cell_type": "code",
"execution_count": 52,
"execution_count": 77,
"id": "c87f2b73-6109-4509-a0ce-62a690c44030",
"metadata": {},
"outputs": [
{
"data": {
"text/plain": [
"'./OUT/embedding_test/track-test'"
]
},
"execution_count": 77,
"metadata": {},
"output_type": "execute_result"
}
],
"source": [
"result_frame_path"
]
},
{
"cell_type": "code",
"execution_count": 69,
"id": "648faf4b-d692-473a-a99d-06b50a2e2261",
"metadata": {},
"outputs": [],
@ -140,7 +161,7 @@
},
{
"cell_type": "code",
"execution_count": 53,
"execution_count": 70,
"id": "a28ef404-2031-43cf-aeb1-357aa1be0934",
"metadata": {},
"outputs": [],
@ -158,6 +179,28 @@
"Load video file and get it's properties. Use that to calculate the dimension to fit the loaded model"
]
},
{
"cell_type": "code",
"execution_count": 74,
"id": "eb63b1c4-fdde-48e9-977e-21ceb10c5316",
"metadata": {},
"outputs": [
{
"data": {
"text/plain": [
"(2, 512)"
]
},
"execution_count": 74,
"metadata": {},
"output_type": "execute_result"
}
],
"source": [
"# array with shape n-components, n-features\n",
"reducer.components_.shape"
]
},
{
"cell_type": "code",
"execution_count": 59,
@ -202,7 +245,7 @@
},
{
"cell_type": "code",
"execution_count": 61,
"execution_count": 75,
"id": "c623aa17-5ce2-4948-9adf-d4c9a6d1ccd2",
"metadata": {},
"outputs": [
@ -212,7 +255,7 @@
"(-1.0, 1.0)"
]
},
"execution_count": 61,
"execution_count": 75,
"metadata": {},
"output_type": "execute_result"
},
@ -242,7 +285,7 @@
},
{
"cell_type": "code",
"execution_count": 62,
"execution_count": 76,
"id": "e8952235-7e56-4606-858a-a9165b967726",
"metadata": {},
"outputs": [],
@ -256,7 +299,7 @@
},
{
"cell_type": "code",
"execution_count": null,
"execution_count": 63,
"id": "f9cc6fd1-b9c2-4303-a21c-a193c6045526",
"metadata": {},
"outputs": [
@ -273,6 +316,17 @@
},
"metadata": {},
"output_type": "display_data"
},
{
"ename": "error",
"evalue": "OpenCV(4.7.0) /io/opencv/modules/imgproc/src/resize.cpp:4062: error: (-215:Assertion failed) !ssize.empty() in function 'resize'\n",
"output_type": "error",
"traceback": [
"\u001b[0;31m---------------------------------------------------------------------------\u001b[0m",
"\u001b[0;31merror\u001b[0m Traceback (most recent call last)",
"Cell \u001b[0;32mIn[63], line 10\u001b[0m\n\u001b[1;32m 8\u001b[0m ret, frame \u001b[38;5;241m=\u001b[39m stream\u001b[38;5;241m.\u001b[39mread()\n\u001b[1;32m 9\u001b[0m \u001b[38;5;66;03m# scale down/up frame to fit tracker\u001b[39;00m\n\u001b[0;32m---> 10\u001b[0m frame \u001b[38;5;241m=\u001b[39m \u001b[43mcv2\u001b[49m\u001b[38;5;241;43m.\u001b[39;49m\u001b[43mresize\u001b[49m\u001b[43m(\u001b[49m\u001b[43mframe\u001b[49m\u001b[43m,\u001b[49m\u001b[43m \u001b[49m\u001b[43m(\u001b[49m\u001b[43mw\u001b[49m\u001b[43m,\u001b[49m\u001b[43m \u001b[49m\u001b[43mh\u001b[49m\u001b[43m)\u001b[49m\u001b[43m)\u001b[49m\n\u001b[1;32m 13\u001b[0m \u001b[38;5;66;03m# letterbox as to have a constant size for the model\u001b[39;00m\n\u001b[1;32m 14\u001b[0m img, _, _, _ \u001b[38;5;241m=\u001b[39m datasets\u001b[38;5;241m.\u001b[39mletterbox(frame, height\u001b[38;5;241m=\u001b[39mopt\u001b[38;5;241m.\u001b[39mimg_size[\u001b[38;5;241m1\u001b[39m], width\u001b[38;5;241m=\u001b[39mopt\u001b[38;5;241m.\u001b[39mimg_size[\u001b[38;5;241m0\u001b[39m])\n",
"\u001b[0;31merror\u001b[0m: OpenCV(4.7.0) /io/opencv/modules/imgproc/src/resize.cpp:4062: error: (-215:Assertion failed) !ssize.empty() in function 'resize'\n"
]
}
],
"source": [
@ -366,10 +420,31 @@
},
{
"cell_type": "code",
"execution_count": null,
"execution_count": 78,
"id": "8f21e642-2320-4a7e-a0ea-af32e8d1a182",
"metadata": {},
"outputs": [],
"outputs": [
{
"name": "stdout",
"output_type": "stream",
"text": [
"ffmpeg version 4.3 Copyright (c) 2000-2020 the FFmpeg developers\n",
" built with gcc 7.3.0 (crosstool-NG 1.23.0.449-a04d0)\n",
" configuration: --prefix=/opt/conda/conda-bld/ffmpeg_1597178665428/_h_env_placehold_placehold_placehold_placehold_placehold_placehold_placehold_placehold_placehold_placehold_placehold_placehold_placehold_placehold_placehold_placehold_placehold_placehold_placehold_placehold_placeh --cc=/opt/conda/conda-bld/ffmpeg_1597178665428/_build_env/bin/x86_64-conda_cos6-linux-gnu-cc --disable-doc --disable-openssl --enable-avresample --enable-gnutls --enable-hardcoded-tables --enable-libfreetype --enable-libopenh264 --enable-pic --enable-pthreads --enable-shared --disable-static --enable-version3 --enable-zlib --enable-libmp3lame\n",
" libavutil 56. 51.100 / 56. 51.100\n",
" libavcodec 58. 91.100 / 58. 91.100\n",
" libavformat 58. 45.100 / 58. 45.100\n",
" libavdevice 58. 10.100 / 58. 10.100\n",
" libavfilter 7. 85.100 / 7. 85.100\n",
" libavresample 4. 0. 0 / 4. 0. 0\n",
" libswscale 5. 7.100 / 5. 7.100\n",
" libswresample 3. 7.100 / 3. 7.100\n",
"\u001b[1;31mUnrecognized option 'crf'.\n",
"\u001b[0m\u001b[4;31mError splitting the argument list: \u001b[0m\u001b[4;31mOption not found\n",
"\u001b[0m"
]
}
],
"source": [
"! ffmpeg -i OUT/embedding_test/track-test/%04d.png -c:v libx264 -crf 10 OUT/embedding_test/track-test.mp4"
]

View file

@ -100,7 +100,8 @@ def eval_seq(opt, dataloader, data_type, result_filename, save_dir=None, save_im
online_targets, frame_embeddings = tracker.update(blob, img0)
online_tlwhs = []
online_ids = []
for t in online_targets:
# for t in online_targets:
for t in tracker.tracked_stracks:
tlwh = t.tlwh
tid = t.track_id
vertical = tlwh[2] / tlwh[3] > 1.6
@ -118,7 +119,7 @@ def eval_seq(opt, dataloader, data_type, result_filename, save_dir=None, save_im
if save_dir is not None:
base_fn = os.path.join(save_dir, '{:05d}'.format(frame_id))
if save_img:
cv2.imwrite(base_fn+'.jpg', online_im)
cv2.imwrite(base_fn+'.jpg', online_im, [cv2.IMWRITE_JPEG_QUALITY, 100])
if save_figures:
for i, fe in enumerate(frame_embeddings):
tlwh, curr_feat = fe