You can not select more than 25 topics
Topics must start with a letter or number, can include dashes ('-') and can be up to 35 characters long.
689 lines
28 KiB
689 lines
28 KiB
# Copyright (c) 2018-present, Facebook, Inc.
|
|
# All rights reserved.
|
|
#
|
|
# This source code is licensed under the license found in the
|
|
# LICENSE file in the root directory of this source tree.
|
|
#
|
|
|
|
import matplotlib
|
|
|
|
matplotlib.use('Agg')
|
|
|
|
import matplotlib.pyplot as plt
|
|
from matplotlib.animation import FuncAnimation, writers
|
|
from mpl_toolkits.mplot3d import Axes3D
|
|
import numpy as np
|
|
import subprocess as sp
|
|
|
|
|
|
def get_resolution(filename):
|
|
command = ['ffprobe', '-v', 'error', '-select_streams', 'v:0',
|
|
'-show_entries', 'stream=width,height', '-of', 'csv=p=0', filename]
|
|
with sp.Popen(command, stdout=sp.PIPE, bufsize=-1) as pipe:
|
|
for line in pipe.stdout:
|
|
w, h = line.decode().strip().split(',')
|
|
return int(w), int(h)
|
|
|
|
|
|
def get_fps(filename):
|
|
command = ['ffprobe', '-v', 'error', '-select_streams', 'v:0',
|
|
'-show_entries', 'stream=r_frame_rate', '-of', 'csv=p=0', filename]
|
|
with sp.Popen(command, stdout=sp.PIPE, bufsize=-1) as pipe:
|
|
for line in pipe.stdout:
|
|
a, b = line.decode().strip().split('/')
|
|
return int(a) / int(b)
|
|
|
|
|
|
def read_video(filename, skip=0, limit=-1):
|
|
w, h = get_resolution(filename)
|
|
|
|
command = ['ffmpeg',
|
|
'-i', filename,
|
|
'-f', 'image2pipe',
|
|
'-pix_fmt', 'rgb24',
|
|
'-vsync', '0',
|
|
'-vcodec', 'rawvideo', '-']
|
|
|
|
i = 0
|
|
with sp.Popen(command, stdout=sp.PIPE, bufsize=-1) as pipe:
|
|
while True:
|
|
data = pipe.stdout.read(w * h * 3)
|
|
if not data:
|
|
break
|
|
i += 1
|
|
if i > limit and limit != -1:
|
|
continue
|
|
if i > skip:
|
|
yield np.frombuffer(data, dtype='uint8').reshape((h, w, 3))
|
|
|
|
|
|
def downsample_tensor(X, factor):
|
|
length = X.shape[0] // factor * factor
|
|
return np.mean(X[:length].reshape(-1, factor, *X.shape[1:]), axis=1)
|
|
|
|
|
|
def render_animation(keypoints, keypoints_metadata, poses, skeleton, fps, bitrate, azim, output, viewport,
|
|
limit=-1, downsample=1, size=6, input_video_path=None, input_video_skip=0, viz_action="",
|
|
viz_subject=""):
|
|
"""
|
|
TODO
|
|
Render an animation. The supported output modes are:
|
|
-- 'interactive': display an interactive figure
|
|
(also works on notebooks if associated with %matplotlib inline)
|
|
-- 'html': render the animation as HTML5 video. Can be displayed in a notebook using HTML(...).
|
|
-- 'filename.mp4': render and export the animation as an h264 video (requires ffmpeg).
|
|
-- 'filename.gif': render and export the animation a gif file (requires imagemagick).
|
|
"""
|
|
|
|
plt.ioff()
|
|
fig = plt.figure(figsize=(size * (1 + len(poses)), size))
|
|
ax_in = fig.add_subplot(1, 1 + len(poses), 1)
|
|
ax_in.get_xaxis().set_visible(False)
|
|
ax_in.get_yaxis().set_visible(False)
|
|
ax_in.set_axis_off()
|
|
ax_in.set_title('Input')
|
|
|
|
ax_3d = []
|
|
lines_3d = []
|
|
trajectories = []
|
|
radius = 1.7
|
|
for index, (title, data) in enumerate(poses.items()):
|
|
ax = fig.add_subplot(1, 1 + len(poses), index + 2, projection='3d')
|
|
ax.view_init(elev=15., azim=azim+90.)
|
|
ax.set_xlim3d([-radius / 2, radius / 2])
|
|
ax.set_zlim3d([0, radius])
|
|
ax.set_ylim3d([-radius / 2, radius / 2])
|
|
# ax.set_aspect('equal')
|
|
ax.set_xticklabels([])
|
|
ax.set_yticklabels([])
|
|
ax.set_zticklabels([])
|
|
ax.dist = 7.5
|
|
ax.set_title(title) # , pad=35
|
|
ax_3d.append(ax)
|
|
lines_3d.append([])
|
|
trajectories.append(data[:, 0, [0, 1]])
|
|
poses = list(poses.values())
|
|
|
|
# Decode video
|
|
if input_video_path is None:
|
|
# Black background
|
|
all_frames = np.zeros((keypoints.shape[0], viewport[1], viewport[0]), dtype='uint8')
|
|
else:
|
|
# Load video using ffmpeg
|
|
all_frames = []
|
|
for f in read_video(input_video_path, skip=input_video_skip, limit=limit):
|
|
all_frames.append(f)
|
|
effective_length = min(keypoints.shape[0], len(all_frames))
|
|
all_frames = all_frames[:effective_length]
|
|
|
|
keypoints = keypoints[input_video_skip:] # todo remove
|
|
for idx in range(len(poses)):
|
|
poses[idx] = poses[idx][input_video_skip:]
|
|
|
|
if fps is None:
|
|
fps = get_fps(input_video_path)
|
|
|
|
if downsample > 1:
|
|
keypoints = downsample_tensor(keypoints, downsample)
|
|
all_frames = downsample_tensor(np.array(all_frames), downsample).astype('uint8')
|
|
for idx in range(len(poses)):
|
|
poses[idx] = downsample_tensor(poses[idx], downsample)
|
|
trajectories[idx] = downsample_tensor(trajectories[idx], downsample)
|
|
fps /= downsample
|
|
|
|
initialized = False
|
|
image = None
|
|
lines = []
|
|
points = None
|
|
|
|
if limit < 1:
|
|
limit = len(all_frames)
|
|
else:
|
|
limit = min(limit, len(all_frames))
|
|
|
|
parents = skeleton.parents()
|
|
|
|
def update_video(i):
|
|
nonlocal initialized, image, lines, points
|
|
|
|
for n, ax in enumerate(ax_3d):
|
|
ax.set_xlim3d([-radius / 2 + trajectories[n][i, 0], radius / 2 + trajectories[n][i, 0]])
|
|
ax.set_ylim3d([-radius / 2 + trajectories[n][i, 1], radius / 2 + trajectories[n][i, 1]])
|
|
|
|
# Update 2D poses
|
|
# joints_right_2d = keypoints_metadata['keypoints_symmetry'][1]
|
|
# joints_left_2d = keypoints_metadata['keypoints_symmetry'][0]
|
|
joints_left_2d = [4, 5, 6, 11, 12, 13]
|
|
joints_right_2d = [1, 2, 3, 14, 15, 16]
|
|
colors_2d = np.full(keypoints.shape[1], 'midnightblue', dtype="object")
|
|
colors_2d[joints_right_2d] = 'yellowgreen'
|
|
colors_2d[joints_left_2d] = 'midnightblue'
|
|
if not initialized:
|
|
image = ax_in.imshow(all_frames[i], aspect='equal')
|
|
|
|
for j, j_parent in enumerate(parents):
|
|
if j_parent == -1:
|
|
continue
|
|
|
|
# if len(parents) == keypoints.shape[1] and keypoints_metadata['layout_name'] != 'coco':
|
|
if len(parents) == keypoints.shape[1]:
|
|
# Draw skeleton only if keypoints match (otherwise we don't have the parents definition)
|
|
lines.append(ax_in.plot([keypoints[i, j, 0], keypoints[i, j_parent, 0]],
|
|
[keypoints[i, j, 1], keypoints[i, j_parent, 1]], color=colors_2d[j]))
|
|
|
|
col = 'red' if j in skeleton.joints_right() else 'black'
|
|
for n, ax in enumerate(ax_3d):
|
|
pos = poses[n][i]
|
|
lines_3d[n].append(ax.plot([pos[j, 0], pos[j_parent, 0]],
|
|
[pos[j, 1], pos[j_parent, 1]],
|
|
[pos[j, 2], pos[j_parent, 2]], zdir='z', c=colors_2d[j]))
|
|
|
|
# points = ax_in.scatter(*keypoints[i].T, 0, zorder=10)
|
|
|
|
initialized = True
|
|
else:
|
|
image.set_data(all_frames[i])
|
|
|
|
for j, j_parent in enumerate(parents):
|
|
if j_parent == -1:
|
|
continue
|
|
|
|
# if len(parents) == keypoints.shape[1] and keypoints_metadata['layout_name'] != 'coco':
|
|
if len(parents) == keypoints.shape[1]:
|
|
lines[j - 1][0].set_data([keypoints[i, j, 0], keypoints[i, j_parent, 0]],
|
|
[keypoints[i, j, 1], keypoints[i, j_parent, 1]])
|
|
|
|
for n, ax in enumerate(ax_3d):
|
|
pos = poses[n][i]
|
|
lines_3d[n][j - 1][0].set_xdata([pos[j, 0], pos[j_parent, 0]])
|
|
lines_3d[n][j - 1][0].set_ydata([pos[j, 1], pos[j_parent, 1]])
|
|
lines_3d[n][j - 1][0].set_3d_properties([pos[j, 2], pos[j_parent, 2]], zdir='z')
|
|
|
|
# points.set_offsets(keypoints[i])
|
|
|
|
print('{}/{} '.format(i, limit), end='\r')
|
|
|
|
fig.tight_layout()
|
|
|
|
anim = FuncAnimation(fig, update_video, frames=np.arange(0, limit), interval=1000 / fps, repeat=False)
|
|
if output.endswith('.mp4'):
|
|
Writer = writers['ffmpeg']
|
|
writer = Writer(fps=fps, metadata={}, bitrate=bitrate)
|
|
anim.save(output, writer=writer)
|
|
elif output.endswith('.gif'):
|
|
anim.save(output, dpi=80, writer='imagemagick')
|
|
else:
|
|
raise ValueError('Unsupported output format (only .mp4 and .gif are supported)')
|
|
plt.close()
|
|
|
|
|
|
def render_animation_temp(keypoints, keypoints_metadata, poses, skeleton, fps, bitrate, azim, output, viewport,
|
|
limit=-1, downsample=1, size=6, input_video_path=None, input_video_skip=0, viz_action="",
|
|
viz_subject=""):
|
|
"""
|
|
TODO
|
|
Render an animation. The supported output modes are:
|
|
-- 'interactive': display an interactive figure
|
|
(also works on notebooks if associated with %matplotlib inline)
|
|
-- 'html': render the animation as HTML5 video. Can be displayed in a notebook using HTML(...).
|
|
-- 'filename.mp4': render and export the animation as an h264 video (requires ffmpeg).
|
|
-- 'filename.gif': render and export the animation a gif file (requires imagemagick).
|
|
"""
|
|
|
|
output = output + "_" + viz_subject + "_" + viz_action + ".mp4"
|
|
print(output)
|
|
|
|
plt.ioff()
|
|
fig = plt.figure(figsize=(size * (1 + len(poses)), size))
|
|
ax_in = fig.add_subplot(1, 1 + len(poses), 1)
|
|
ax_in.get_xaxis().set_visible(False)
|
|
ax_in.get_yaxis().set_visible(False)
|
|
ax_in.set_axis_off()
|
|
ax_in.set_title('Input')
|
|
|
|
ax_3d = []
|
|
lines_3d = []
|
|
trajectories = []
|
|
radius = 1.7
|
|
for index, (title, data) in enumerate(poses.items()):
|
|
ax = fig.add_subplot(1, 1 + len(poses), index + 2, projection='3d')
|
|
ax.view_init(elev=15., azim=azim)
|
|
ax.set_xlim3d([-radius / 2, radius / 2])
|
|
ax.set_zlim3d([0, radius])
|
|
ax.set_ylim3d([-radius / 2, radius / 2])
|
|
# ax.set_aspect('equal')
|
|
ax.set_xticklabels([])
|
|
ax.set_yticklabels([])
|
|
ax.set_zticklabels([])
|
|
ax.dist = 7.5
|
|
ax.set_title(title) # , pad=35
|
|
ax_3d.append(ax)
|
|
lines_3d.append([])
|
|
trajectories.append(data[:, 0, [0, 1]])
|
|
poses = list(poses.values())
|
|
|
|
# Decode video
|
|
if input_video_path is None:
|
|
# Black background
|
|
all_frames = np.zeros((keypoints.shape[0], viewport[1], viewport[0]), dtype='uint8')
|
|
else:
|
|
# Load video using ffmpeg
|
|
all_frames = []
|
|
for f in read_video(input_video_path, skip=input_video_skip, limit=limit):
|
|
all_frames.append(f)
|
|
effective_length = min(keypoints.shape[0], len(all_frames))
|
|
all_frames = all_frames[:effective_length]
|
|
|
|
keypoints = keypoints[input_video_skip:] # todo remove
|
|
for idx in range(len(poses)):
|
|
poses[idx] = poses[idx][input_video_skip:]
|
|
|
|
if fps is None:
|
|
fps = get_fps(input_video_path)
|
|
|
|
if downsample > 1:
|
|
keypoints = downsample_tensor(keypoints, downsample)
|
|
all_frames = downsample_tensor(np.array(all_frames), downsample).astype('uint8')
|
|
for idx in range(len(poses)):
|
|
poses[idx] = downsample_tensor(poses[idx], downsample)
|
|
trajectories[idx] = downsample_tensor(trajectories[idx], downsample)
|
|
fps /= downsample
|
|
|
|
initialized = False
|
|
image = None
|
|
lines = []
|
|
points = None
|
|
|
|
if limit < 1:
|
|
limit = len(all_frames)
|
|
else:
|
|
limit = min(limit, len(all_frames))
|
|
|
|
parents = skeleton.parents()
|
|
|
|
def update_video(i):
|
|
nonlocal initialized, image, lines, points
|
|
|
|
for n, ax in enumerate(ax_3d):
|
|
ax.set_xlim3d([-radius / 2 + trajectories[n][i, 0], radius / 2 + trajectories[n][i, 0]])
|
|
ax.set_ylim3d([-radius / 2 + trajectories[n][i, 1], radius / 2 + trajectories[n][i, 1]])
|
|
|
|
# Update 2D poses
|
|
joints_right_2d = keypoints_metadata['keypoints_symmetry'][1]
|
|
joints_left_2d = keypoints_metadata['keypoints_symmetry'][0]
|
|
colors_2d = np.full(keypoints.shape[1], 'peru', dtype="object")
|
|
colors_2d[joints_right_2d] = 'darkseagreen'
|
|
colors_2d[joints_left_2d] = 'slateblue'
|
|
if not initialized:
|
|
image = ax_in.imshow(all_frames[i], aspect='equal')
|
|
|
|
for j, j_parent in enumerate(parents):
|
|
if j_parent == -1:
|
|
continue
|
|
|
|
# if len(parents) == keypoints.shape[1] and keypoints_metadata['layout_name'] != 'coco':
|
|
if len(parents) == keypoints.shape[1]:
|
|
# Draw skeleton only if keypoints match (otherwise we don't have the parents definition)
|
|
lines.append(ax_in.plot([keypoints[i, j, 0], keypoints[i, j_parent, 0]],
|
|
[keypoints[i, j, 1], keypoints[i, j_parent, 1]], color=colors_2d[j]))
|
|
|
|
col = 'red' if j in skeleton.joints_right() else 'black'
|
|
for n, ax in enumerate(ax_3d):
|
|
pos = poses[n][i]
|
|
lines_3d[n].append(ax.plot([pos[j, 0], pos[j_parent, 0]],
|
|
[pos[j, 1], pos[j_parent, 1]],
|
|
[pos[j, 2], pos[j_parent, 2]], zdir='z', c=colors_2d[j]))
|
|
|
|
points = ax_in.scatter(*keypoints[i].T, 10, color=colors_2d, edgecolors='white', zorder=10)
|
|
|
|
initialized = True
|
|
else:
|
|
image.set_data(all_frames[i])
|
|
|
|
for j, j_parent in enumerate(parents):
|
|
if j_parent == -1:
|
|
continue
|
|
|
|
# if len(parents) == keypoints.shape[1] and keypoints_metadata['layout_name'] != 'coco':
|
|
if len(parents) == keypoints.shape[1]:
|
|
lines[j - 1][0].set_data([keypoints[i, j, 0], keypoints[i, j_parent, 0]],
|
|
[keypoints[i, j, 1], keypoints[i, j_parent, 1]])
|
|
|
|
for n, ax in enumerate(ax_3d):
|
|
pos = poses[n][i]
|
|
lines_3d[n][j - 1][0].set_xdata([pos[j, 0], pos[j_parent, 0]])
|
|
lines_3d[n][j - 1][0].set_ydata([pos[j, 1], pos[j_parent, 1]])
|
|
lines_3d[n][j - 1][0].set_3d_properties([pos[j, 2], pos[j_parent, 2]], zdir='z')
|
|
|
|
points.set_offsets(keypoints[i])
|
|
|
|
print('{}/{} '.format(i, limit), end='\r')
|
|
|
|
fig.tight_layout()
|
|
|
|
anim = FuncAnimation(fig, update_video, frames=np.arange(0, limit), interval=1000 / fps, repeat=False)
|
|
if output.endswith('.mp4'):
|
|
Writer = writers['ffmpeg']
|
|
writer = Writer(fps=fps, metadata={}, bitrate=bitrate)
|
|
anim.save(output, writer=writer)
|
|
elif output.endswith('.gif'):
|
|
anim.save(output, dpi=80, writer='imagemagick')
|
|
else:
|
|
raise ValueError('Unsupported output format (only .mp4 and .gif are supported)')
|
|
plt.close()
|
|
|
|
|
|
def render_animation_T(keypoints, keypoints_metadata, poses, skeleton, fps, bitrate, azim, output,
|
|
viewport,
|
|
limit=-1, downsample=1, size=6, input_video_path=None, input_video_skip=0, viz_action="",
|
|
viz_subject=""):
|
|
"""
|
|
TODO
|
|
Render an animation. The supported output modes are:
|
|
-- 'interactive': display an interactive figure
|
|
(also works on notebooks if associated with %matplotlib inline)
|
|
-- 'html': render the animation as HTML5 video. Can be displayed in a notebook using HTML(...).
|
|
-- 'filename.mp4': render and export the animation as an h264 video (requires ffmpeg).
|
|
-- 'filename.gif': render and export the animation a gif file (requires imagemagick).
|
|
"""
|
|
|
|
output = output + "_" + viz_subject + "_" + viz_action + ".mp4"
|
|
print(output)
|
|
|
|
plt.ioff()
|
|
fig = plt.figure(figsize=(size * (1 + len(poses)), size))
|
|
ax_in = fig.add_subplot(1, 1 + len(poses), 1)
|
|
ax_in.get_xaxis().set_visible(False)
|
|
ax_in.get_yaxis().set_visible(False)
|
|
ax_in.set_axis_off()
|
|
ax_in.set_title('Input')
|
|
|
|
ax_3d = []
|
|
lines_3d = []
|
|
trajectories = []
|
|
radius = 1.7
|
|
for index, (title, data) in enumerate(poses.items()):
|
|
ax = fig.add_subplot(1, 1 + len(poses), index + 2, projection='3d')
|
|
ax.view_init(elev=15., azim=azim)
|
|
ax.set_xlim3d([-radius / 2, radius / 2])
|
|
ax.set_zlim3d([0, radius])
|
|
ax.set_ylim3d([-radius / 2, radius / 2])
|
|
# ax.set_aspect('equal')
|
|
ax.set_xticklabels([])
|
|
ax.set_yticklabels([])
|
|
ax.set_zticklabels([])
|
|
ax.dist = 7.5
|
|
ax.set_title(title) # , pad=35
|
|
ax_3d.append(ax)
|
|
lines_3d.append([])
|
|
trajectories.append(data[:, 0, [0, 1]])
|
|
poses = list(poses.values())
|
|
|
|
# Decode video
|
|
if input_video_path is None:
|
|
# Black background
|
|
all_frames = np.zeros((keypoints.shape[0], viewport[1], viewport[0]), dtype='uint8')
|
|
else:
|
|
# Load video using ffmpeg
|
|
all_frames = []
|
|
for f in read_video(input_video_path, skip=input_video_skip, limit=limit):
|
|
all_frames.append(f)
|
|
effective_length = min(keypoints.shape[0], len(all_frames))
|
|
all_frames = all_frames[:effective_length]
|
|
|
|
keypoints = keypoints[input_video_skip:] # todo remove
|
|
for idx in range(len(poses)):
|
|
poses[idx] = poses[idx][input_video_skip:]
|
|
|
|
if fps is None:
|
|
fps = get_fps(input_video_path)
|
|
|
|
if downsample > 1:
|
|
keypoints = downsample_tensor(keypoints, downsample)
|
|
all_frames = downsample_tensor(np.array(all_frames), downsample).astype('uint8')
|
|
for idx in range(len(poses)):
|
|
poses[idx] = downsample_tensor(poses[idx], downsample)
|
|
trajectories[idx] = downsample_tensor(trajectories[idx], downsample)
|
|
fps /= downsample
|
|
|
|
initialized = False
|
|
image = None
|
|
lines = []
|
|
points = None
|
|
|
|
if limit < 1:
|
|
limit = len(all_frames)
|
|
else:
|
|
limit = min(limit, len(all_frames))
|
|
|
|
parents = skeleton.parents()
|
|
|
|
def update_video(i):
|
|
nonlocal initialized, image, lines, points
|
|
|
|
for n, ax in enumerate(ax_3d):
|
|
ax.set_xlim3d([-radius / 2 + trajectories[n][i, 0], radius / 2 + trajectories[n][i, 0]])
|
|
ax.set_ylim3d([-radius / 2 + trajectories[n][i, 1], radius / 2 + trajectories[n][i, 1]])
|
|
|
|
# Update 2D poses
|
|
joints_right_2d = keypoints_metadata['keypoints_symmetry'][1]
|
|
joints_left_2d = keypoints_metadata['keypoints_symmetry'][0]
|
|
colors_2d = np.full(keypoints.shape[1], 'peru', dtype="object")
|
|
colors_2d[joints_right_2d] = 'darkseagreen'
|
|
colors_2d[joints_left_2d] = 'slateblue'
|
|
if not initialized:
|
|
image = ax_in.imshow(all_frames[i], aspect='equal')
|
|
|
|
for j, j_parent in enumerate(parents):
|
|
if j_parent == -1:
|
|
continue
|
|
|
|
# if len(parents) == keypoints.shape[1] and keypoints_metadata['layout_name'] != 'coco':
|
|
if len(parents) == keypoints.shape[1]:
|
|
# Draw skeleton only if keypoints match (otherwise we don't have the parents definition)
|
|
lines.append(ax_in.plot([keypoints[i, j, 0], keypoints[i, j_parent, 0]],
|
|
[keypoints[i, j, 1], keypoints[i, j_parent, 1]], color=colors_2d[j]))
|
|
|
|
col = 'red' if j in skeleton.joints_right() else 'black'
|
|
for n, ax in enumerate(ax_3d):
|
|
pos = poses[n][i]
|
|
lines_3d[n].append(ax.plot([pos[j, 0], pos[j_parent, 0]],
|
|
[pos[j, 1], pos[j_parent, 1]],
|
|
[pos[j, 2], pos[j_parent, 2]], zdir='z', c=colors_2d[j]))
|
|
|
|
# points = ax_in.scatter(*keypoints[i].T, 0, zorder=10)
|
|
|
|
initialized = True
|
|
else:
|
|
image.set_data(all_frames[i])
|
|
|
|
for j, j_parent in enumerate(parents):
|
|
if j_parent == -1:
|
|
continue
|
|
|
|
# if len(parents) == keypoints.shape[1] and keypoints_metadata['layout_name'] != 'coco':
|
|
if len(parents) == keypoints.shape[1]:
|
|
lines[j - 1][0].set_data([keypoints[i, j, 0], keypoints[i, j_parent, 0]],
|
|
[keypoints[i, j, 1], keypoints[i, j_parent, 1]])
|
|
|
|
for n, ax in enumerate(ax_3d):
|
|
pos = poses[n][i]
|
|
lines_3d[n][j - 1][0].set_xdata([pos[j, 0], pos[j_parent, 0]])
|
|
lines_3d[n][j - 1][0].set_ydata([pos[j, 1], pos[j_parent, 1]])
|
|
lines_3d[n][j - 1][0].set_3d_properties([pos[j, 2], pos[j_parent, 2]], zdir='z')
|
|
|
|
# points.set_offsets(keypoints[i])
|
|
|
|
print('{}/{} '.format(i, limit), end='\r')
|
|
|
|
fig.tight_layout()
|
|
|
|
anim = FuncAnimation(fig, update_video, frames=np.arange(0, limit), interval=1000 / fps, repeat=False)
|
|
if output.endswith('.mp4'):
|
|
Writer = writers['ffmpeg']
|
|
writer = Writer(fps=fps, metadata={}, bitrate=bitrate)
|
|
anim.save(output, writer=writer)
|
|
elif output.endswith('.gif'):
|
|
anim.save(output, dpi=80, writer='imagemagick')
|
|
else:
|
|
raise ValueError('Unsupported output format (only .mp4 and .gif are supported)')
|
|
plt.close()
|
|
|
|
|
|
def render_animation_humaneva(keypoints, keypoints_metadata, poses, skeleton, fps, bitrate, azim, output,
|
|
viewport,
|
|
limit=-1, downsample=1, size=6, input_video_path=None, input_video_skip=0, viz_action="",
|
|
viz_subject=""):
|
|
"""
|
|
TODO
|
|
Render an animation. The supported output modes are:
|
|
-- 'interactive': display an interactive figure
|
|
(also works on notebooks if associated with %matplotlib inline)
|
|
-- 'html': render the animation as HTML5 video. Can be displayed in a notebook using HTML(...).
|
|
-- 'filename.mp4': render and export the animation as an h264 video (requires ffmpeg).
|
|
-- 'filename.gif': render and export the animation a gif file (requires imagemagick).
|
|
"""
|
|
|
|
# output = output + "_" + viz_subject + "_" + viz_action + ".mp4"
|
|
# print(output)
|
|
|
|
plt.ioff()
|
|
fig = plt.figure(figsize=(size * (1 + len(poses)), size))
|
|
ax_in = fig.add_subplot(1, 1 + len(poses), 1)
|
|
ax_in.get_xaxis().set_visible(False)
|
|
ax_in.get_yaxis().set_visible(False)
|
|
ax_in.set_axis_off()
|
|
ax_in.set_title('Input')
|
|
|
|
ax_3d = []
|
|
lines_3d = []
|
|
lines_3d_anno = []
|
|
trajectories = []
|
|
radius = 1.7
|
|
for index, (title, data) in enumerate(poses.items()):
|
|
ax = fig.add_subplot(1, 1 + len(poses), index + 2, projection='3d')
|
|
ax.view_init(elev=15., azim=azim)
|
|
ax.set_xlim3d([-radius / 2, radius / 2])
|
|
ax.set_zlim3d([0, radius])
|
|
ax.set_ylim3d([-radius / 2, radius / 2])
|
|
# ax.set_aspect('equal')
|
|
ax.set_xticklabels([])
|
|
ax.set_yticklabels([])
|
|
ax.set_zticklabels([])
|
|
ax.dist = 7.5
|
|
ax.set_title(title) # , pad=35
|
|
ax_3d.append(ax)
|
|
lines_3d.append([])
|
|
trajectories.append(data[:, 0, [0, 1]])
|
|
poses = list(poses.values())
|
|
|
|
# Decode video
|
|
if input_video_path is None:
|
|
# Black background
|
|
all_frames = np.zeros((keypoints.shape[0], viewport[1], viewport[0]), dtype='uint8')
|
|
else:
|
|
# Load video using ffmpeg
|
|
all_frames = []
|
|
for f in read_video(input_video_path, skip=input_video_skip, limit=limit):
|
|
all_frames.append(f)
|
|
effective_length = min(keypoints.shape[0], len(all_frames))
|
|
all_frames = all_frames[:effective_length]
|
|
|
|
keypoints = keypoints[input_video_skip:] # todo remove
|
|
for idx in range(len(poses)):
|
|
poses[idx] = poses[idx][input_video_skip:]
|
|
|
|
if fps is None:
|
|
fps = get_fps(input_video_path)
|
|
|
|
if downsample > 1:
|
|
keypoints = downsample_tensor(keypoints, downsample)
|
|
all_frames = downsample_tensor(np.array(all_frames), downsample).astype('uint8')
|
|
for idx in range(len(poses)):
|
|
poses[idx] = downsample_tensor(poses[idx], downsample)
|
|
trajectories[idx] = downsample_tensor(trajectories[idx], downsample)
|
|
fps /= downsample
|
|
|
|
initialized = False
|
|
image = None
|
|
lines = []
|
|
points = None
|
|
|
|
if limit < 1:
|
|
limit = len(all_frames)
|
|
else:
|
|
limit = min(limit, len(all_frames))
|
|
|
|
parents = skeleton.parents()
|
|
|
|
def update_video(i):
|
|
nonlocal initialized, image, lines, points
|
|
|
|
for n, ax in enumerate(ax_3d):
|
|
ax.set_xlim3d([-radius / 2 + trajectories[n][i, 0], radius / 2 + trajectories[n][i, 0]])
|
|
ax.set_ylim3d([-radius / 2 + trajectories[n][i, 1], radius / 2 + trajectories[n][i, 1]])
|
|
|
|
# Update 2D poses
|
|
joints_right_2d = keypoints_metadata['keypoints_symmetry'][1]
|
|
joints_left_2d = keypoints_metadata['keypoints_symmetry'][0]
|
|
colors_2d = np.full(keypoints.shape[1], 'peru', dtype="object")
|
|
colors_2d[joints_right_2d] = 'darkseagreen'
|
|
colors_2d[joints_left_2d] = 'slateblue'
|
|
if not initialized:
|
|
image = ax_in.imshow(all_frames[i], aspect='equal')
|
|
|
|
for j, j_parent in enumerate(parents):
|
|
if j_parent == -1:
|
|
continue
|
|
|
|
# if len(parents) == keypoints.shape[1] and keypoints_metadata['layout_name'] != 'coco':
|
|
if len(parents) == keypoints.shape[1]:
|
|
# Draw skeleton only if keypoints match (otherwise we don't have the parents definition)
|
|
lines.append(ax_in.plot([keypoints[i, j, 0], keypoints[i, j_parent, 0]],
|
|
[keypoints[i, j, 1], keypoints[i, j_parent, 1]], color=colors_2d[j]))
|
|
|
|
col = 'red' if j in skeleton.joints_right() else 'black'
|
|
for n, ax in enumerate(ax_3d):
|
|
pos = poses[n][i]
|
|
lines_3d[n].append(ax.plot([pos[j, 0], pos[j_parent, 0]],
|
|
[pos[j, 1], pos[j_parent, 1]],
|
|
[pos[j, 2], pos[j_parent, 2]], zdir='z', c=colors_2d[j]))
|
|
ax.text(pos[j, 0] - 0.1, pos[j, 1] - 0.1, pos[j, 2] - 0.1, j)
|
|
|
|
# points = ax_in.scatter(*keypoints[i].T, 0, zorder=10)
|
|
|
|
initialized = True
|
|
else:
|
|
image.set_data(all_frames[i])
|
|
|
|
for j, j_parent in enumerate(parents):
|
|
if j_parent == -1:
|
|
continue
|
|
|
|
# if len(parents) == keypoints.shape[1] and keypoints_metadata['layout_name'] != 'coco':
|
|
if len(parents) == keypoints.shape[1]:
|
|
lines[(j - 1) * 2][0].set_data([keypoints[i, j, 0], keypoints[i, j_parent, 0]],
|
|
[keypoints[i, j, 1], keypoints[i, j_parent, 1]])
|
|
|
|
for n, ax in enumerate(ax_3d):
|
|
pos = poses[n][i]
|
|
lines_3d[n][j - 1][0].set_xdata([pos[j, 0], pos[j_parent, 0]])
|
|
lines_3d[n][j - 1][0].set_ydata([pos[j, 1], pos[j_parent, 1]])
|
|
lines_3d[n][j - 1][0].set_3d_properties([pos[j, 2], pos[j_parent, 2]], zdir='z')
|
|
|
|
# points.set_offsets(keypoints[i])
|
|
|
|
print('{}/{} '.format(i, limit), end='\r')
|
|
|
|
fig.tight_layout()
|
|
|
|
anim = FuncAnimation(fig, update_video, frames=np.arange(0, limit), interval=1000 / fps, repeat=False)
|
|
if output.endswith('.mp4'):
|
|
Writer = writers['ffmpeg']
|
|
writer = Writer(fps=fps, metadata={}, bitrate=bitrate)
|
|
anim.save(output, writer=writer)
|
|
elif output.endswith('.gif'):
|
|
anim.save(output, dpi=80, writer='imagemagick')
|
|
else:
|
|
raise ValueError('Unsupported output format (only .mp4 and .gif are supported)')
|
|
plt.close()
|
|
|