Skip to content

Instantly share code, notes, and snippets.

@NicolasGoeddel
Last active November 7, 2023 15:53
Show Gist options
  • Save NicolasGoeddel/f809779f02167c565af3457a217b7e42 to your computer and use it in GitHub Desktop.
Save NicolasGoeddel/f809779f02167c565af3457a217b7e42 to your computer and use it in GitHub Desktop.
Installation script for facebookresearch / pifuhd
#!/bin/bash
minVersion="$(python3 -V 2>&1 | grep -Po '(?<=Python 3\.)([0-9]+)')"
if (( "$minVersion" <= 5 )); then
echo "You're python version is too old. You need at least Python 3.6.x" >&2
echo "Try the following: "
echo " sudo add-apt-repository ppa:deadsnakes/ppa"
echo " sudo apt update"
echo " sudo apt upgrade"
echo " sudo apt install python3.8 python3.8-venv python3.8-dev python3.8-gdbm"
echo "Then make sure that Python 3.8 is the default interpreter by either configuring update-alternatives or using an alias in your .bashrc."
echo "You can find more information here: https://medium.com/analytics-vidhya/installing-python-3-8-3-66701d3db134"
exit 1
fi
installPath="$(pwd)"
if [ -n "$1" ]; then
mkdir -p "$1" || exit 1
installPath="(realpath "$1")"
fi
extraPackages=(
"liboctomap-dev"
"libfcl-dev"
"libspatialindex-dev"
"libgl1-mesa-glx"
)
installPackages=()
for package in "${extraPackages[@]}"; do
if ! dpkg-query -W -f='${Status}\n' "$package" 2>/dev/null | grep -q "install ok"; then
installPackages+=("$package")
fi
done
if (( "${#installPackages[@]}" > 0 )); then
echo "The following additional packages are needed for trimesh[all], rtree and opencv2: ${installPackages[@]}"
sudo apt install "${installPackages[@]}" || exit 1
fi
versionError=false
fclVersion="$(dpkg-query -W -f='${Version}\n' libfcl-dev)"
if ! [[ "$fclVersion" =~ ^0\.5.* ]]; then
echo "libfcl-dev is only available in version $fclVersion but we need at least version 0.5.0." >&2
versionError=true
fi
octoVersion="$(dpkg-query -W -f='${Version}\n' liboctomap-dev)"
if ! [[ "$octoVersion" =~ ^1\.[89].* ]]; then
echo "liboctomap-dev is only available in version $octoVersion but we need at least version 1.8.0." >&2
versionError=true
fi
if $versionError; then
echo "You can install the following packages manually and restart the script:"
echo " http://mirrors.kernel.org/ubuntu/pool/universe/f/fcl/libfcl0.5_0.5.0-5_amd64.deb"
echo " http://mirrors.kernel.org/ubuntu/pool/universe/f/fcl/libfcl-dev_0.5.0-5_amd64.deb"
echo " http://mirrors.kernel.org/ubuntu/pool/universe/o/octomap/liboctomap1.8_1.8.1+dfsg-1_amd64.deb"
echo " http://mirrors.kernel.org/ubuntu/pool/universe/o/octomap/liboctomap-dev_1.8.1+dfsg-1_amd64.deb"
echo ""
echo "Example:"
echo " wget http://mirrors.kernel.org/ubuntu/pool/universe/f/fcl/libfcl0.5_0.5.0-5_amd64.deb"
echo " sudo dpkg -i libfcl0.5_0.5.0-5_amd64.deb"
exit 1
fi
cd "$installPath"
if ! [ -d "pifuhd" ]; then
git clone https://github.com/facebookresearch/pifuhd.git || exit 1
fi
cd pifuhd
if ! [ -f "checkpoints/pifuhd.pt" ]; then
sh ./scripts/download_trained_model.sh
fi
if ! [ -d ".venv" ]; then
python3 -m venv .venv
fi
source .venv/bin/activate
pip install --upgrade pip || exit 1
pip install numpy || exit 1
pip install torch==1.4.0+cpu torchvision==0.5.0+cpu -f https://download.pytorch.org/whl/torch_stable.html || exit 1
pip install wheel || exit 1
pip install opencv-python tqdm matplotlib scikit-image pyopengl trimesh[all] pycocotools || exit 1
mkdir -p content
cd content
if ! [ -d "lightweight-human-pose-estimation.pytorch" ]; then
git clone https://github.com/Daniil-Osokin/lightweight-human-pose-estimation.pytorch.git
fi
cd lightweight-human-pose-estimation.pytorch
wget -nc https://download.01.org/opencv/openvino_training_extensions/models/human_pose_estimation/checkpoint_iter_370000.pth
cat > "getrect.py" << EOM
import torch
import cv2
import numpy as np
from models.with_mobilenet import PoseEstimationWithMobileNet
from modules.keypoints import extract_keypoints, group_keypoints
from modules.load_state import load_state
from modules.pose import Pose, track_poses
import demo
import argparse
parser = argparse.ArgumentParser()
parser.add_argument('-i', '--image_path', type=str, default=None)
args = parser.parse_args()
def get_rect(net, images, height_size, cpu=False):
net = net.eval()
stride = 8
upsample_ratio = 4
num_keypoints = Pose.num_kpts
previous_poses = []
delay = 33
for image in images:
rect_path = image.replace('.%s' % (image.split('.')[-1]), '_rect.txt')
img = cv2.imread(image, cv2.IMREAD_COLOR)
orig_img = img.copy()
orig_img = img.copy()
heatmaps, pafs, scale, pad = demo.infer_fast(net, img, height_size, stride, upsample_ratio, cpu=cpu)
total_keypoints_num = 0
all_keypoints_by_type = []
for kpt_idx in range(num_keypoints): # 19th for bg
total_keypoints_num += extract_keypoints(heatmaps[:, :, kpt_idx], all_keypoints_by_type, total_keypoints_num)
pose_entries, all_keypoints = group_keypoints(all_keypoints_by_type, pafs, demo=True)
for kpt_id in range(all_keypoints.shape[0]):
all_keypoints[kpt_id, 0] = (all_keypoints[kpt_id, 0] * stride / upsample_ratio - pad[1]) / scale
all_keypoints[kpt_id, 1] = (all_keypoints[kpt_id, 1] * stride / upsample_ratio - pad[0]) / scale
current_poses = []
rects = []
for n in range(len(pose_entries)):
if len(pose_entries[n]) == 0:
continue
pose_keypoints = np.ones((num_keypoints, 2), dtype=np.int32) * -1
valid_keypoints = []
for kpt_id in range(num_keypoints):
if pose_entries[n][kpt_id] != -1.0: # keypoint was found
pose_keypoints[kpt_id, 0] = int(all_keypoints[int(pose_entries[n][kpt_id]), 0])
pose_keypoints[kpt_id, 1] = int(all_keypoints[int(pose_entries[n][kpt_id]), 1])
valid_keypoints.append([pose_keypoints[kpt_id, 0], pose_keypoints[kpt_id, 1]])
valid_keypoints = np.array(valid_keypoints)
if pose_entries[n][10] != -1.0 or pose_entries[n][13] != -1.0:
pmin = valid_keypoints.min(0)
pmax = valid_keypoints.max(0)
center = (0.5 * (pmax[:2] + pmin[:2])).astype(np.int)
radius = int(0.65 * max(pmax[0]-pmin[0], pmax[1]-pmin[1]))
elif pose_entries[n][10] == -1.0 and pose_entries[n][13] == -1.0 and pose_entries[n][8] != -1.0 and pose_entries[n][11] != -1.0:
# if leg is missing, use pelvis to get cropping
center = (0.5 * (pose_keypoints[8] + pose_keypoints[11])).astype(np.int)
radius = int(1.45*np.sqrt(((center[None,:] - valid_keypoints)**2).sum(1)).max(0))
center[1] += int(0.05*radius)
else:
center = np.array([img.shape[1]//2,img.shape[0]//2])
radius = max(img.shape[1]//2,img.shape[0]//2)
x1 = center[0] - radius
y1 = center[1] - radius
rects.append([x1, y1, 2*radius, 2*radius])
np.savetxt(rect_path, np.array(rects), fmt='%d')
net = PoseEstimationWithMobileNet()
checkpoint = torch.load('checkpoint_iter_370000.pth', map_location=torch.device('cpu'))
load_state(net, checkpoint)
get_rect(net.cpu(), [args.image_path], 512, cpu = True)
EOM
cd "${installPath}/pifuhd"
git apply - 2>/dev/null << EOM
diff --git a/apps/recon.py b/apps/recon.py
index 8b2c98f..a5f75f3 100644
--- a/apps/recon.py
+++ b/apps/recon.py
@@ -145,7 +145,7 @@ def recon(opt, use_rect=False):
state_dict = None
if state_dict_path is not None and os.path.exists(state_dict_path):
print('Resuming from ', state_dict_path)
- state_dict = torch.load(state_dict_path)
+ state_dict = torch.load(state_dict_path, map_location=torch.device('cpu'))
print('Warning: opt is overwritten.')
dataroot = opt.dataroot
resolution = opt.resolution
@@ -162,7 +162,7 @@ def recon(opt, use_rect=False):
# parser.print_options(opt)
- cuda = torch.device('cuda:%d' % opt.gpu_id)
+ cuda = torch.device('cpu')
if use_rect:
test_dataset = EvalDataset(opt)
EOM
cat >> ".gitignore" << EOM
content/
.vscode/
.venv/
*.pyc
results/
checkpoints/
EOM
cat > "run.sh" << EOM
#!/bin/bash
if [ -z "\$1" ]; then
echo "Missing parameter: $0 <image>" >&2
exit 1
fi
image="\$(realpath "\$1")"
dirImage="\$(dirname "\$image")"
source .venv/bin/activate
cd content/lightweight-human-pose-estimation.pytorch
python getrect.py -i "\$image"
cd ../..
python -m apps.simple_test -r 256 --use_rect -i "\$dirImage"
EOM
chmod +x run.sh
mv samples_images/test_keypoints.json{,.bak} 2>/dev/null
echo "You can now go to ${installPath}/pifuhd/ and execute"
echo " run.sh samples_images/test.png"
echo "to check if it is working correctly."
echo ""
echo "Change the value in the last line in 'run.sh' to 512 if you have enough memory."
echo "You can find the results in results/pifuhd_final/recon/"
@DUMBANIKET
Copy link

image
i have some issue (im not good at ai/ml stuffs) , so as per my case im using an ubuntu 20 server with 16 gigs of ram and activated the virtual environment manually and all the packages are install properly though i think something is messed :)

@NicolasGoeddel
Copy link
Author

i have some issue (im not good at ai/ml stuffs) , so as per my case im using an ubuntu 20 server with 16 gigs of ram and activated the virtual environment manually and all the packages are install properly though i think something is messed :)

Unfortunately I am also not experienced in AI stuff. I was only able to get it installed properly. I didn't use this script since I created it. But it seems that the human pose estimation got an update and the parameter demo is no longer used as you can see here: https://github.com/Daniil-Osokin/lightweight-human-pose-estimation.pytorch/blob/1590929b601535def07ead5522f05e5096c1b6ac/modules/keypoints.py#L64
It was changed already 3 years ago in this commit: Daniil-Osokin/lightweight-human-pose-estimation.pytorch@ee9e4cc

So if you are lucky you can just delete the demo=True parameter in getrect.py on line 30 and try again.

Sign up for free to join this conversation on GitHub. Already have an account? Sign in to comment