Advertisement

3d pose 学习笔记2025

阅读量:

目录

champ

nlf 3dpose 2025 55个关键点

推理代码:

要设置环境变量:

依赖项metrabs

渲染代码:

tram4d 脚也不是特别好

GVHMR脚对不齐

推理代码:

multiperson 2023年

genhmr还没开源:


champ

https://zhuanlan.zhihu.com/p/700326554

nlf 3dpose 2025 55个关键点

https://github.com/isarandi/nlf

推理代码:

要设置环境变量:

复制代码
    DATA_ROOT = os.environ['DATA_ROOT']

依赖项metrabs

博客上的《cameralib安装过程》

GitHub - isarandi/metrabs: Calculate precise 3D human poses in an absolute manner based on RGB images.

https://github.com/usmannamjad/3D-HMR/blob/main/scripts/nlf_pred.py

推理代码

复制代码
  
    
 # coding=utf-8
    
 import sys
    
 import os
    
 current_dir = os.path.dirname(os.path.abspath(__file__))
    
 os.chdir(current_dir)
    
  
    
 paths = [os.path.abspath(__file__).split('scripts')[0]]
    
 print('current_dir',current_dir)
    
 paths.append(os.path.abspath(os.path.join(current_dir, 'src')))
    
  
    
 for path in paths:
    
     sys.path.insert(0, path)
    
     os.environ['PYTHONPATH'] = (os.environ.get('PYTHONPATH', '') + ':' + path).strip(':')
    
  
    
 import glob
    
 os.environ['CUDA_VISIBLE_DEVICES'] = '2'
    
 import torch
    
 import torchvision
    
 from pathlib import Path
    
 import pickle
    
 import gc
    
 import time
    
 import warnings
    
 import os
    
 warnings.filterwarnings("ignore")
    
  
    
  
    
 # Load the TorchScript model
    
 model = torch.jit.load('./nlf_l_multi.torchscript').cuda().eval()
    
 batch_size = 1
    
 key_names = ['boxes', 'pose', 'betas', 'trans', 'vertices3d', 'joints3d', 'vertices2d', 'joints2d', 'vertices3d_nonparam', 'joints3d_nonparam', 'vertices2d_nonparam', 'joints2d_nonparam',
    
          'vertex_uncertainties', 'joint_uncertainties']
    
  
    
 paths=glob.glob(r"/shared_disk/users/lbg/project/inpaint/see3d/mp4s/person_big_0108.mp4")
    
  
    
 out_dir='output/'
    
 os.makedirs(out_dir, exist_ok=True)
    
 REMAINING_VIDEOS = True
    
  
    
 videos_not_processed = []
    
 for vi, video_path in enumerate(paths):
    
     try:
    
     frames, _, _ = torchvision.io.read_video(video_path, pts_unit="sec")
    
  
    
     # Convert frames to tensor and move to GPU
    
     frames = frames.permute(0, 3, 1, 2).cuda()  # Shape: (num_frames, C, H, W)
    
  
    
     # Process video frames in batches
    
     num_frames = frames.shape[0]
    
     print("frames shape: ", frames.shape, num_frames)
    
  
    
     results = {key: [] for key in key_names}
    
     # with torch.inference_mode():
    
     with torch.no_grad():
    
         for i in range(0, num_frames, batch_size):
    
             frame_batch = frames[i:i + batch_size]
    
             print("i: ", i,"i end: ", i + batch_size,frame_batch.shape)
    
             start=time.time()
    
             preds = model.detect_smpl_batched(frame_batch, model_name='smplx')
    
             print(i,'time',time.time()-start)
    
             for key in preds:
    
                 results[key].extend([p.cpu() for p in preds[key]])
    
  
    
     del frames
    
     torch.cuda.empty_cache()
    
     gc.collect()
    
     print("output: ", len(results['pose']))
    
     print("output: ", len(results['betas']))
    
     video_path = str(video_path).replace('.mp4', '')
    
     video_path = video_path.split('/')
    
     video_name = video_path[-1]
    
     subset = video_path[-2]
    
     save_path = out_dir  + f"{video_name}.pkl"
    
     with open(save_path, 'wb') as f:
    
         pickle.dump(results, f)
    
     # del results
    
     time.sleep(1)
    
  
    
     print("reading picklefile")
    
     with open(save_path, 'rb') as f:
    
         results = pickle.load(f)
    
     print("results: ", len(results['pose']))
    
     print("results: ", len(results['betas']))
    
     except Exception as e:
    
     videos_not_processed.append(str(video_path))
    
     print(f"Error processing video: {video_path}")
    
     print(e)
    
  
    
 print(f"Videos not processed:", len(videos_not_processed))
    
 output_file = out_dir+ "videos_not_processed.txt"
    
 with open(output_file, 'w') as f:
    
     f.write("\n".join(videos_not_processed))

渲染代码:

https://github.com/usmannamjad/3D-HMR/blob/main/scripts/render_world_space_motion.py

复制代码
 # coding=utf-8

    
 import sys
    
 import os
    
  
    
 import imageio
    
 current_dir = os.path.dirname(os.path.abspath(__file__))
    
 os.chdir(current_dir)
    
  
    
 paths = [os.path.abspath(__file__).split('scripts')[0]]
    
 print('current_dir',current_dir)
    
 paths.append(os.path.abspath(os.path.join(current_dir, 'src')))
    
  
    
 for path in paths:
    
     sys.path.insert(0, path)
    
     os.environ['PYTHONPATH'] = (os.environ.get('PYTHONPATH', '') + ':' + path).strip(':')
    
  
    
 import pickle
    
 import numpy as np
    
 import torch
    
 import numpy as np
    
 import cv2
    
 import pyrender
    
 from smplx import SMPLX
    
 from tqdm import tqdm
    
 import trimesh
    
 import os
    
 os.environ["PYOPENGL_PLATFORM"] = "egl"
    
 os.environ['EGL_DEVICE_ID'] = '0'
    
  
    
 video_path = fr"/shared_disk/users/lbg/project/inpaint/see3d/mp4s/person_big_0108.mp4"
    
 pred_pkl = f"/shared_disk/users/lbg/project/human_4d/nlf_pose/output/person_big_0108.pkl"
    
  
    
  
    
 with open(pred_pkl, 'rb') as f:
    
     pred = pickle.load(f)
    
  
    
 pose = pred['pose']
    
  
    
 target_shape = pose[0].shape  # 以第一个张量的形状为基准
    
 inconsistent_frames = []
    
  
    
 for i, tensor in enumerate(pose):
    
     if tensor.shape != target_shape:
    
     inconsistent_frames.append(i)
    
     pose[i] = torch.zeros(1, 165)
    
     print(f"Frame {i} has inconsistent shape: {tensor.shape} (expected {target_shape})")
    
     
    
  
    
 pose = torch.cat(pose, dim=0)
    
 # print("pred: ", pred.shape)
    
 # pred = pred[:, :3+63].reshape(-1, 22, 3)
    
 motion = pose[:-1]
    
  
    
  
    
 # Load SMPL-X model
    
 model_path = './SMPLX_NEUTRAL.npz'
    
 model = SMPLX(model_path, gender='neutral', use_pca=False)
    
  
    
 num_frames = len(pred)
    
 os.makedirs("out",exist_ok=True)
    
 renderer = pyrender.OffscreenRenderer(viewport_width=1080, viewport_height=1920)
    
 import gc
    
 frames = []
    
 for i in tqdm(range(num_frames)):
    
     
    
     # if i>3:
    
     # break
    
     # Extract parameters
    
     root_orient = pose[i][ :3].unsqueeze(0)
    
     pose_body = pose[i][ 3:22*3].unsqueeze(0)
    
     trans = pred['trans'][i]
    
     betas = pred['betas'][i]
    
  
    
     # Generate SMPL-X model output
    
     output = model(global_orient=root_orient, body_pose=pose_body,
    
                betas=betas, transl=trans)
    
     vertices = output.vertices.detach().cpu().numpy().squeeze()
    
     faces = model.faces
    
  
    
     mesh = trimesh.Trimesh(vertices=vertices, faces=faces)
    
     # Create a pyrender scene
    
     scene = pyrender.Scene()
    
     mesh = pyrender.Mesh.from_trimesh(mesh)
    
     scene.add(mesh)
    
  
    
     # Set up the camera and light
    
     camera = pyrender.PerspectiveCamera(yfov=np.pi / 3.0, aspectRatio=1.0)
    
     camera_pose = np.eye(4)
    
     camera_pose[:3, 3] = np.array([0, 0, 3])  # Move camera further away
    
  
    
     scene.add(camera, pose=camera_pose)
    
     light = pyrender.DirectionalLight(color=[1.0, 1.0, 1.0], intensity=2.0)
    
     scene.add(light, pose=camera_pose)
    
  
    
     # Render the scene
    
     # renderer = pyrender.OffscreenRenderer(viewport_width=800, viewport_height=800)
    
     color, depth = renderer.render(scene)
    
  
    
     cv2.imwrite(f"out/{i}.jpg",cv2.cvtColor(color, cv2.COLOR_RGB2BGR))
    
     frames.append(color)
    
     scene.clear()  # 清除场景,释放资源
    
     gc.collect()  # 强制垃圾回收
    
 print("here")
    
 # Save as video
    
  
    
 imageio.mimsave( 'output.mp4', frames, fps=25,macro_block_size=None)
    
  
    
 if 0:
    
     video_path = 'output.mp4'
    
     out = cv2.VideoWriter(video_path, cv2.VideoWriter_fourcc(*'mp4v'), 1, (800, 800))
    
     for frame in frames:
    
     out.write(cv2.cvtColor(frame, cv2.COLOR_RGB2BGR))
    
     
    
     cv2.imwrite("111.jpg",cv2.cvtColor(frame, cv2.COLOR_RGB2BGR))
    
     out.release()
    
     print(f'Video saved at {video_path}')

tram4d 脚也不是特别好

https://github.com/yufu-wang/tram

GVHMR脚对不齐

https://github.com/zju3dv/GVHMR

推理代码:

复制代码
 # coding=utf-8

    
 import sys
    
 import os
    
 current_dir = os.path.dirname(os.path.abspath(__file__))
    
 os.chdir(current_dir)
    
  
    
 paths = [os.path.abspath(__file__).split('scripts')[0]]
    
 print('current_dir',current_dir)
    
 paths.append(os.path.abspath(os.path.join(current_dir, '../')))
    
  
    
 for path in paths:
    
     sys.path.insert(0, path)
    
     os.environ['PYTHONPATH'] = (os.environ.get('PYTHONPATH', '') + ':' + path).strip(':')
    
  
    
 import cv2
    
 import torch
    
  
    
 import cv2
    
 import torch
    
 import pytorch_lightning as pl
    
 import numpy as np
    
 import argparse
    
 from hmr4d.utils.pylogger import Log
    
 import hydra
    
 from hydra import initialize_config_module, compose
    
 from pathlib import Path
    
 from pytorch3d.transforms import quaternion_to_matrix
    
  
    
 from hmr4d.configs import register_store_gvhmr
    
 from hmr4d.utils.video_io_utils import (
    
     get_video_lwh,
    
     read_video_np,
    
     save_video,
    
     merge_videos_horizontal,
    
     get_writer,
    
     get_video_reader,
    
 )
    
 from hmr4d.utils.vis.cv2_utils import draw_bbx_xyxy_on_image_batch, draw_coco17_skeleton_batch
    
  
    
 from hmr4d.utils.preproc import Tracker, Extractor, VitPoseExtractor, SLAMModel
    
  
    
 from hmr4d.utils.geo.hmr_cam import get_bbx_xys_from_xyxy, estimate_K, convert_K_to_K4, create_camera_sensor
    
 from hmr4d.utils.geo_transform import compute_cam_angvel
    
 from hmr4d.model.gvhmr.gvhmr_pl_demo import DemoPL
    
 from hmr4d.utils.net_utils import detach_to_cpu, to_cuda
    
 from hmr4d.utils.smplx_utils import make_smplx
    
 from hmr4d.utils.vis.renderer import Renderer, get_global_cameras_static, get_ground_params_from_points
    
 from tqdm import tqdm
    
 from hmr4d.utils.geo_transform import apply_T_on_points, compute_T_ayfz2ay
    
 from einops import einsum, rearrange
    
  
    
  
    
 CRF = 23  # 17 is lossless, every +6 halves the mp4 size
    
  
    
  
    
 def parse_args_to_cfg():
    
     # Put all args to cfg
    
     parser = argparse.ArgumentParser()
    
     parser.add_argument("--video", type=str, default=r"/shared_disk/users/lbg/project/inpaint/see3d/mp4s/person_big_0108.mp4")
    
     parser.add_argument("--output_root", type=str, default='output', help="by default to outputs/demo")
    
     parser.add_argument("-s", "--static_cam", default=True, action="store_true", help="If true, skip DPVO")
    
     parser.add_argument("--verbose", action="store_true", help="If true, draw intermediate results")
    
     args = parser.parse_args()
    
  
    
     # Input
    
     video_path = Path(args.video)
    
     assert video_path.exists(), f"Video not found at {video_path}"
    
     length, width, height = get_video_lwh(video_path)
    
     Log.info(f"[Input]: {video_path}")
    
     Log.info(f"(L, W, H) = ({length}, {width}, {height})")
    
     # Cfg
    
     with initialize_config_module(version_base="1.3", config_module=f"hmr4d.configs"):
    
     overrides = [
    
         f"video_name={video_path.stem}",
    
         f"static_cam={args.static_cam}",
    
         f"verbose={args.verbose}",
    
     ]
    
  
    
     # Allow to change output root
    
     if args.output_root is not None:
    
         overrides.append(f"output_root={args.output_root}")
    
     register_store_gvhmr()
    
     cfg = compose(config_name="demo", overrides=overrides)
    
  
    
     # Output
    
     Log.info(f"[Output Dir]: {cfg.output_dir}")
    
     Path(cfg.output_dir).mkdir(parents=True, exist_ok=True)
    
     Path(cfg.preprocess_dir).mkdir(parents=True, exist_ok=True)
    
  
    
     # Copy raw-input-video to video_path
    
     Log.info(f"[Copy Video] {video_path} -> {cfg.video_path}")
    
     if not Path(cfg.video_path).exists() or get_video_lwh(video_path)[0] != get_video_lwh(cfg.video_path)[0]:
    
     reader = get_video_reader(video_path)
    
     writer = get_writer(cfg.video_path, fps=30, crf=CRF)
    
     for img in tqdm(reader, total=get_video_lwh(video_path)[0], desc=f"Copy"):
    
         writer.write_frame(img)
    
     writer.close()
    
     reader.close()
    
  
    
     return cfg
    
  
    
  
    
 @torch.no_grad()
    
 def run_preprocess(cfg):
    
     Log.info(f"[Preprocess] Start!")
    
     tic = Log.time()
    
     video_path = cfg.video_path
    
     paths = cfg.paths
    
     static_cam = cfg.static_cam
    
     verbose = cfg.verbose
    
  
    
     # Get bbx tracking result
    
     if not Path(paths.bbx).exists():
    
     tracker = Tracker()
    
     bbx_xyxy = tracker.get_one_track(video_path).float()  # (L, 4)
    
     bbx_xys = get_bbx_xys_from_xyxy(bbx_xyxy, base_enlarge=1.2).float()  # (L, 3) apply aspect ratio and enlarge
    
     torch.save({"bbx_xyxy": bbx_xyxy, "bbx_xys": bbx_xys}, paths.bbx)
    
     del tracker
    
     else:
    
     bbx_xys = torch.load(paths.bbx)["bbx_xys"]
    
     Log.info(f"[Preprocess] bbx (xyxy, xys) from {paths.bbx}")
    
     if verbose:
    
     video = read_video_np(video_path)
    
     bbx_xyxy = torch.load(paths.bbx)["bbx_xyxy"]
    
     video_overlay = draw_bbx_xyxy_on_image_batch(bbx_xyxy, video)
    
     save_video(video_overlay, cfg.paths.bbx_xyxy_video_overlay)
    
  
    
     # Get VitPose
    
     if not Path(paths.vitpose).exists():
    
     vitpose_extractor = VitPoseExtractor()
    
     vitpose = vitpose_extractor.extract(video_path, bbx_xys)
    
     torch.save(vitpose, paths.vitpose)
    
     del vitpose_extractor
    
     else:
    
     vitpose = torch.load(paths.vitpose)
    
     Log.info(f"[Preprocess] vitpose from {paths.vitpose}")
    
     if verbose:
    
     video = read_video_np(video_path)
    
     video_overlay = draw_coco17_skeleton_batch(video, vitpose, 0.5)
    
     save_video(video_overlay, paths.vitpose_video_overlay)
    
  
    
     # Get vit features
    
     if not Path(paths.vit_features).exists():
    
     extractor = Extractor()
    
     vit_features = extractor.extract_video_features(video_path, bbx_xys)
    
     torch.save(vit_features, paths.vit_features)
    
     del extractor
    
     else:
    
     Log.info(f"[Preprocess] vit_features from {paths.vit_features}")
    
  
    
     # Get DPVO results
    
     if not static_cam:  # use slam to get cam rotation
    
     if not Path(paths.slam).exists():
    
         length, width, height = get_video_lwh(cfg.video_path)
    
         K_fullimg = estimate_K(width, height)
    
         intrinsics = convert_K_to_K4(K_fullimg)
    
         slam = SLAMModel(video_path, width, height, intrinsics, buffer=4000, resize=0.5)
    
         bar = tqdm(total=length, desc="DPVO")
    
         while True:
    
             ret = slam.track()
    
             if ret:
    
                 bar.update()
    
             else:
    
                 break
    
         slam_results = slam.process()  # (L, 7), numpy
    
         torch.save(slam_results, paths.slam)
    
     else:
    
         Log.info(f"[Preprocess] slam results from {paths.slam}")
    
  
    
     Log.info(f"[Preprocess] End. Time elapsed: {Log.time()-tic:.2f}s")
    
  
    
  
    
 def load_data_dict(cfg):
    
     paths = cfg.paths
    
     length, width, height = get_video_lwh(cfg.video_path)
    
     if cfg.static_cam:
    
     R_w2c = torch.eye(3).repeat(length, 1, 1)
    
     else:
    
     traj = torch.load(cfg.paths.slam)
    
     traj_quat = torch.from_numpy(traj[:, [6, 3, 4, 5]])
    
     R_w2c = quaternion_to_matrix(traj_quat).mT
    
     K_fullimg = estimate_K(width, height).repeat(length, 1, 1)
    
     # K_fullimg = create_camera_sensor(width, height, 26)[2].repeat(length, 1, 1)
    
  
    
     data = {
    
     "length": torch.tensor(length),
    
     "bbx_xys": torch.load(paths.bbx)["bbx_xys"],
    
     "kp2d": torch.load(paths.vitpose),
    
     "K_fullimg": K_fullimg,
    
     "cam_angvel": compute_cam_angvel(R_w2c),
    
     "f_imgseq": torch.load(paths.vit_features),
    
     }
    
     return data
    
  
    
  
    
 def render_incam(cfg):
    
     incam_video_path = Path(cfg.paths.incam_video)
    
     if incam_video_path.exists():
    
     Log.info(f"[Render Incam] Video already exists at {incam_video_path}")
    
     return
    
  
    
     pred = torch.load(cfg.paths.hmr4d_results)
    
     smplx = make_smplx("supermotion").cuda()
    
     smplx2smpl = torch.load("hmr4d/utils/body_model/smplx2smpl_sparse.pt").cuda()
    
     faces_smpl = make_smplx("smpl").faces
    
  
    
     # smpl
    
     smplx_out = smplx(**to_cuda(pred["smpl_params_incam"]))
    
     pred_c_verts = torch.stack([torch.matmul(smplx2smpl, v_) for v_ in smplx_out.vertices])
    
  
    
     # -- rendering code -- #
    
     video_path = cfg.video_path
    
     length, width, height = get_video_lwh(video_path)
    
     K = pred["K_fullimg"][0]
    
  
    
     # renderer
    
     renderer = Renderer(width, height, device="cuda", faces=faces_smpl, K=K)
    
     reader = get_video_reader(video_path)  # (F, H, W, 3), uint8, numpy
    
     bbx_xys_render = torch.load(cfg.paths.bbx)["bbx_xys"]
    
  
    
     # -- render mesh -- #
    
     verts_incam = pred_c_verts
    
     writer = get_writer(incam_video_path, fps=30, crf=CRF)
    
     for i, img_raw in tqdm(enumerate(reader), total=get_video_lwh(video_path)[0], desc=f"Rendering Incam"):
    
     img = renderer.render_mesh(verts_incam[i].cuda(), img_raw, [0.8, 0.8, 0.8])
    
  
    
     # # bbx
    
     # bbx_xys_ = bbx_xys_render[i].cpu().numpy()
    
     # lu_point = (bbx_xys_[:2] - bbx_xys_[2:] / 2).astype(int)
    
     # rd_point = (bbx_xys_[:2] + bbx_xys_[2:] / 2).astype(int)
    
     # img = cv2.rectangle(img, lu_point, rd_point, (255, 178, 102), 2)
    
  
    
     writer.write_frame(img)
    
     writer.close()
    
     reader.close()
    
  
    
  
    
 def render_global(cfg):
    
     global_video_path = Path(cfg.paths.global_video)
    
     if global_video_path.exists():
    
     Log.info(f"[Render Global] Video already exists at {global_video_path}")
    
     return
    
  
    
     debug_cam = False
    
     pred = torch.load(cfg.paths.hmr4d_results)
    
     smplx = make_smplx("supermotion").cuda()
    
     smplx2smpl = torch.load("hmr4d/utils/body_model/smplx2smpl_sparse.pt").cuda()
    
     faces_smpl = make_smplx("smpl").faces
    
     J_regressor = torch.load("hmr4d/utils/body_model/smpl_neutral_J_regressor.pt").cuda()
    
  
    
     # smpl
    
     smplx_out = smplx(**to_cuda(pred["smpl_params_global"]))
    
     pred_ay_verts = torch.stack([torch.matmul(smplx2smpl, v_) for v_ in smplx_out.vertices])
    
  
    
     def move_to_start_point_face_z(verts):
    
     "XZ to origin, Start from the ground, Face-Z"
    
     # position
    
     verts = verts.clone()  # (L, V, 3)
    
     offset = einsum(J_regressor, verts[0], "j v, v i -> j i")[0]  # (3)
    
     offset[1] = verts[:, :, [1]].min()
    
     verts = verts - offset
    
     # face direction
    
     T_ay2ayfz = compute_T_ayfz2ay(einsum(J_regressor, verts[[0]], "j v, l v i -> l j i"), inverse=True)
    
     verts = apply_T_on_points(verts, T_ay2ayfz)
    
     return verts
    
  
    
     verts_glob = move_to_start_point_face_z(pred_ay_verts)
    
     joints_glob = einsum(J_regressor, verts_glob, "j v, l v i -> l j i")  # (L, J, 3)
    
     global_R, global_T, global_lights = get_global_cameras_static(
    
     verts_glob.cpu(),
    
     beta=2.0,
    
     cam_height_degree=20,
    
     target_center_height=1.0,
    
     )
    
  
    
     # -- rendering code -- #
    
     video_path = cfg.video_path
    
     length, width, height = get_video_lwh(video_path)
    
     _, _, K = create_camera_sensor(width, height, 24)  # render as 24mm lens
    
  
    
     # renderer
    
     renderer = Renderer(width, height, device="cuda", faces=faces_smpl, K=K)
    
     # renderer = Renderer(width, height, device="cuda", faces=faces_smpl, K=K, bin_size=0)
    
  
    
     # -- render mesh -- #
    
     scale, cx, cz = get_ground_params_from_points(joints_glob[:, 0], verts_glob)
    
     renderer.set_ground(scale * 1.5, cx, cz)
    
     color = torch.ones(3).float().cuda() * 0.8
    
  
    
     render_length = length if not debug_cam else 8
    
     writer = get_writer(global_video_path, fps=30, crf=CRF)
    
     for i in tqdm(range(render_length), desc=f"Rendering Global"):
    
     cameras = renderer.create_camera(global_R[i], global_T[i])
    
     img = renderer.render_with_ground(verts_glob[[i]], color[None], cameras, global_lights)
    
     writer.write_frame(img)
    
     writer.close()
    
  
    
  
    
 if __name__ == "__main__":
    
     cfg = parse_args_to_cfg()
    
     paths = cfg.paths
    
     Log.info(f"[GPU]: {torch.cuda.get_device_name()}")
    
     Log.info(f'[GPU]: {torch.cuda.get_device_properties("cuda")}')
    
  
    
     # ===== Preprocess and save to disk ===== #
    
     run_preprocess(cfg)
    
     data = load_data_dict(cfg)
    
  
    
     # ===== HMR4D ===== #
    
     if not Path(paths.hmr4d_results).exists():
    
     Log.info("[HMR4D] Predicting")
    
     model: DemoPL = hydra.utils.instantiate(cfg.model, _recursive_=False)
    
     model.load_pretrained_model(cfg.ckpt_path)
    
     model = model.eval().cuda()
    
     tic = Log.sync_time()
    
     pred = model.predict(data, static_cam=cfg.static_cam)
    
     pred = detach_to_cpu(pred)
    
     data_time = data["length"] / 30
    
     Log.info(f"[HMR4D] Elapsed: {Log.sync_time() - tic:.2f}s for data-length={data_time:.1f}s")
    
     torch.save(pred, paths.hmr4d_results)
    
  
    
     # ===== Render ===== #
    
     render_incam(cfg)
    
     render_global(cfg)
    
     if not Path(paths.incam_global_horiz_video).exists():
    
     Log.info("[Merge Videos]")
    
     merge_videos_horizontal([paths.incam_video, paths.global_video], paths.incam_global_horiz_video)

multiperson 2023年

https://github.com/JunukCha/MultiPerson

genhmr还没开源:

https://m-usamasaleem.github.io/publication/GenHMR/GenHMR.html

全部评论 (0)

还没有任何评论哟~