You can not select more than 25 topics Topics must start with a letter or number, can include dashes ('-') and can be up to 35 characters long.

185 lines
5.6 KiB

# Copyright (c) OpenMMLab. All rights reserved.
import os
import warnings
from argparse import ArgumentParser
import cv2
from mmpose.apis import (inference_top_down_pose_model, init_pose_model,
vis_pose_tracking_result)
from mmpose.datasets import DatasetInfo
try:
from mmtrack.apis import inference_mot
from mmtrack.apis import init_model as init_tracking_model
has_mmtrack = True
except (ImportError, ModuleNotFoundError):
has_mmtrack = False
def process_mmtracking_results(mmtracking_results):
"""Process mmtracking results.
:param mmtracking_results:
:return: a list of tracked bounding boxes
"""
person_results = []
# 'track_results' is changed to 'track_bboxes'
# in https://github.com/open-mmlab/mmtracking/pull/300
if 'track_bboxes' in mmtracking_results:
tracking_results = mmtracking_results['track_bboxes'][0]
elif 'track_results' in mmtracking_results:
tracking_results = mmtracking_results['track_results'][0]
for track in tracking_results:
person = {}
person['track_id'] = int(track[0])
person['bbox'] = track[1:]
person_results.append(person)
return person_results
def main():
"""Visualize the demo images.
Using mmdet to detect the human.
"""
parser = ArgumentParser()
parser.add_argument('tracking_config', help='Config file for tracking')
parser.add_argument('pose_config', help='Config file for pose')
parser.add_argument('pose_checkpoint', help='Checkpoint file for pose')
parser.add_argument('--video-path', type=str, help='Video path')
parser.add_argument(
'--show',
action='store_true',
default=False,
help='whether to show visualizations.')
parser.add_argument(
'--out-video-root',
default='',
help='Root of the output video file. '
'Default not saving the visualization video.')
parser.add_argument(
'--device', default='cuda:0', help='Device used for inference')
parser.add_argument(
'--bbox-thr',
type=float,
default=0.3,
help='Bounding box score threshold')
parser.add_argument(
'--kpt-thr', type=float, default=0.3, help='Keypoint score threshold')
parser.add_argument(
'--radius',
type=int,
default=4,
help='Keypoint radius for visualization')
parser.add_argument(
'--thickness',
type=int,
default=1,
help='Link thickness for visualization')
assert has_mmtrack, 'Please install mmtrack to run the demo.'
args = parser.parse_args()
assert args.show or (args.out_video_root != '')
assert args.tracking_config is not None
tracking_model = init_tracking_model(
args.tracking_config, None, device=args.device.lower())
# build the pose model from a config file and a checkpoint file
pose_model = init_pose_model(
args.pose_config, args.pose_checkpoint, device=args.device.lower())
dataset = pose_model.cfg.data['test']['type']
dataset_info = pose_model.cfg.data['test'].get('dataset_info', None)
if dataset_info is None:
warnings.warn(
'Please set `dataset_info` in the config.'
'Check https://github.com/open-mmlab/mmpose/pull/663 for details.',
DeprecationWarning)
else:
dataset_info = DatasetInfo(dataset_info)
cap = cv2.VideoCapture(args.video_path)
assert cap.isOpened(), f'Faild to load video file {args.video_path}'
if args.out_video_root == '':
save_out_video = False
else:
os.makedirs(args.out_video_root, exist_ok=True)
save_out_video = True
if save_out_video:
fps = cap.get(cv2.CAP_PROP_FPS)
size = (int(cap.get(cv2.CAP_PROP_FRAME_WIDTH)),
int(cap.get(cv2.CAP_PROP_FRAME_HEIGHT)))
fourcc = cv2.VideoWriter_fourcc(*'mp4v')
videoWriter = cv2.VideoWriter(
os.path.join(args.out_video_root,
f'vis_{os.path.basename(args.video_path)}'), fourcc,
fps, size)
# optional
return_heatmap = False
# e.g. use ('backbone', ) to return backbone feature
output_layer_names = None
frame_id = 0
while (cap.isOpened()):
flag, img = cap.read()
if not flag:
break
mmtracking_results = inference_mot(
tracking_model, img, frame_id=frame_id)
# keep the person class bounding boxes.
person_results = process_mmtracking_results(mmtracking_results)
# test a single image, with a list of bboxes.
pose_results, returned_outputs = inference_top_down_pose_model(
pose_model,
img,
person_results,
bbox_thr=args.bbox_thr,
format='xyxy',
dataset=dataset,
dataset_info=dataset_info,
return_heatmap=return_heatmap,
outputs=output_layer_names)
# show the results
vis_img = vis_pose_tracking_result(
pose_model,
img,
pose_results,
radius=args.radius,
thickness=args.thickness,
dataset=dataset,
dataset_info=dataset_info,
kpt_score_thr=args.kpt_thr,
show=False)
if args.show:
cv2.imshow('Image', vis_img)
if save_out_video:
videoWriter.write(vis_img)
if args.show and cv2.waitKey(1) & 0xFF == ord('q'):
break
frame_id += 1
cap.release()
if save_out_video:
videoWriter.release()
if args.show:
cv2.destroyAllWindows()
if __name__ == '__main__':
main()