Last active
August 15, 2019 13:06
-
-
Save mukheshpugal/2c236ad9e77f0a54e6d76c2b454ca407 to your computer and use it in GitHub Desktop.
Pose estimation using scipy.least_squares()
This file contains hidden or bidirectional Unicode text that may be interpreted or compiled differently than what appears below. To review, open the file in an editor that reveals hidden Unicode characters.
Learn more about bidirectional Unicode characters
from scipy.optimize import least_squares as ls | |
import math | |
import numpy as np | |
from PIL import Image | |
import cv2 | |
from matplotlib import pyplot as plt | |
def huber(r, delta): | |
if math.sqrt(r) < delta: | |
return r / (2 * delta) | |
return sqrt(r) - delta / 2 | |
def convertParamsToPose(eParams): | |
rotation_x = np.array([[1, 0, 0], [0, math.cos(eParams[1]), -math.sin(eParams[1])], [0, math.sin(eParams[1]), math.cos(eParams[1])]]) | |
rotation_y = np.array([[math.cos(eParams[2]), 0, math.sin(eParams[2])], [0, 1, 0], [-math.sin(eParams[2]), 0, math.cos(eParams[2])]]) | |
rotation_z = np.array([[math.cos(eParams[3]), -math.sin(eParams[3]), 0], [math.sin(eParams[3]), math.cos(eParams[3]), 0], [0, 0, 1]]) | |
rotation_3d = np.matmul(np.matmul(rotation_x, rotation_y), rotation_z) | |
translation = np.array([[eParams[4]], [eParams[5]], [eParams[6]]]) | |
pose = np.hstack((eParams[0] * rotation_3d, translation)) | |
pose = np.vstack((pose, [0, 0, 0, 1])) | |
return pose | |
def convertPoseToParams(pose): | |
scale = math.sqrt(pose[0][0] * pose[0][0] + pose[0][1] * pose[0][1] + pose[0][2] * pose[0][2]) | |
rotation = pose[:3, :3] / scale | |
translation = pose[:3, 3] | |
rot = np.ndarray((3)) | |
rot[0] = math.atan2(-rotation[1][2], rotation[2][2]) | |
rot[1] = math.asin(rotation[0][2]) | |
rot[2] = math.atan2(-rotation[0][1], rotation[0][0]) | |
final = np.array([scale]) | |
print(np.shape(final)) | |
print(np.shape(rot)) | |
print(np.shape(translation)) | |
final = np.concatenate((final, rot)) | |
final = np.concatenate((final, translation)) | |
return final | |
def photometricResidual(u, i_k, dVal, i_t, pose, cameraIntrinsics): #dVal is the depth value of keyframe image at u | |
intrinsics_inv = np.linalg.inv(cameraIntrinsics) | |
vertexMap = np.matmul(intrinsics_inv, dVal * cv2.convertPointsToHomogeneous(u)) | |
vertexMap = cv2.convertPointsToHomogeneous(vertexMap) | |
proj3d = np.matmul(pose, vertexMap) | |
proj2d = np.matmul(cameraIntrinsics, proj3d) | |
proj2d = cv2.convertPointsFromHomogeneous(proj2d) | |
residual = i_k[u[0]][u[1]] - i_t[proj2d[0]][proj2d[1]] | |
return residual | |
def variance(u, i_k, dVal, i_t, pose, cameraIntrinsics, invDepthVar): | |
dr = photometricResidual(u, i_k, dVal + 0.01, i_t, pose, cameraIntrinsics) - photometricResidual(u, i_k, dVal, i_t, pose, cameraIntrinsics) | |
grad = dr / 0.01 | |
sig_i = 0; | |
return sig_i * sig_i + grad * grad * invDepthVar | |
def poseOptimize(initGuess, image1, dMap, image2, intrinsics, accuracy): #accuracy refers to the number of high gradient points for which the residual is to be calculated | |
laplacian = cv2.Laplacian(image1,cv2.CV_64F) | |
def costs(x): | |
result = np.array([[]]) | |
count = 0 | |
while count < accuracy: | |
u = [math.floor(laplacian.shape[0] * random.random()), math.floor(laplacian.shape[1] * random.random())] | |
if math.fabs(laplacian[u[0], u[1]]) > 30: | |
r_2 = math.pow(photometricResidual(u, image1, dMap[u[0]][u[1]], image2, convertParamsToPose(x), cameraIntrinsics), 2) | |
cost = huber(r_2 / variance(u, image1, dMap[u[0]][u[1]], image2, convertParamsToPose(x), cameraIntrinsics, uncMap[u[0]][u[1]])) | |
result = np.hstack(result, cost) | |
count++ | |
return result | |
res_1 = least_squares(costs, initGuess) | |
return res_1.x | |
intrinsicMat = [[525.0, 0, 319.5], [0, 525.0, 239.0], [0, 0, 1]] | |
kf = cv2.imread('kf.png',0) | |
kf_d = cv2.imread('kf_d.png',0) | |
it = cv2.imread('it.png',0) | |
it_d = cv2.imread('it_d.png',0) | |
result = poseOptimize([1, 0, 0, 0, 0, 0, 0], kf, kf_d, it, intrinsicMat, 100) | |
endPose = convertPoseToParams(result) | |
print(endPose) |
Sign up for free
to join this conversation on GitHub.
Already have an account?
Sign in to comment