Last active
August 10, 2023 06:10
-
-
Save ashwanirathee/de006063818ddb8102b79ff9022b2be6 to your computer and use it in GitHub Desktop.
calibration work
This file contains hidden or bidirectional Unicode text that may be interpreted or compiled differently than what appears below. To review, open the file in an editor that reveals hidden Unicode characters.
Learn more about bidirectional Unicode characters
### A Pluto.jl notebook ### | |
# v0.19.27 | |
using Markdown | |
using InteractiveUtils | |
# This Pluto notebook uses @bind for interactivity. When running this notebook outside of Pluto, the following 'mock version' of @bind gives bound variables a default value (instead of an error). | |
macro bind(def, element) | |
quote | |
local iv = try Base.loaded_modules[Base.PkgId(Base.UUID("6e696c72-6542-2067-7265-42206c756150"), "AbstractPlutoDingetjes")].Bonds.initial_value catch; b -> missing; end | |
local el = $(esc(element)) | |
global $(esc(def)) = Core.applicable(Base.get, el) ? Base.get(el) : iv(el) | |
el | |
end | |
end | |
# ╔═╡ 4934e4de-b03d-419f-a076-9a8116f5ddf5 | |
begin | |
using Pkg; | |
Pkg.activate(".") ; | |
end | |
# ╔═╡ 15321a8a-0714-4f44-91a4-99a54c29e46f | |
Pkg.add("PyCall") | |
# ╔═╡ 14519106-d4cf-4a77-acca-a22b7c426334 | |
using Images, ImageDraw, LinearAlgebra, CoordinateTransformations, Statistics, FileIO, Dates | |
# ╔═╡ a51f6584-b4b6-494a-9456-3d4ef0d74112 | |
using PyCall | |
# ╔═╡ 11bb0034-d018-4417-9d76-e9509403d542 | |
as_ints(a::AbstractArray{CartesianIndex{L}}) where L = reshape(reinterpret(Int, a), (L, size(a)...)) | |
# ╔═╡ c11e3475-1835-462f-bdc5-c3f2592b1980 | |
"$(Dates.datetime2epochms(now()))" | |
# ╔═╡ cb0ef50c-1f87-45f3-ad3f-339cc15098c1 | |
pwd() | |
# ╔═╡ f5642319-05ee-4731-ad26-80bcd4f6aa7b | |
begin | |
### Important | |
### Webcam related utils | |
function camera_input(;max_size=1280, default_url="https://i.imgur.com/SUmi94P.png") | |
""" | |
<span class="pl-image waiting-for-permission"> | |
<style> | |
.pl-image.popped-out { | |
position: fixed; | |
top: 0; | |
right: 0; | |
z-index: 5; | |
} | |
.pl-image #video-container { | |
width: 640px; | |
} | |
.pl-image video { | |
# border-radius: 1rem 1rem 0 0; | |
} | |
.pl-image.waiting-for-permission #video-container { | |
display: none; | |
} | |
.pl-image #prompt { | |
display: none; | |
} | |
.pl-image.waiting-for-permission #prompt { | |
width: 640px; | |
height: 360px; | |
display: grid; | |
place-items: center; | |
font-family: monospace; | |
font-weight: bold; | |
text-decoration: underline; | |
cursor: pointer; | |
border: 5px dashed rgba(0,0,0,.5); | |
} | |
.pl-image video { | |
display: block; | |
} | |
.pl-image .bar { | |
width: inherit; | |
display: flex; | |
z-index: 6; | |
} | |
.pl-image .bar#top { | |
position: absolute; | |
flex-direction: column; | |
} | |
.pl-image .bar#bottom { | |
background: black; | |
# border-radius: 0 0 1rem 1rem; | |
} | |
.pl-image .bar button { | |
flex: 0 0 auto; | |
background: rgba(255,255,255,.8); | |
border: none; | |
width: 2rem; | |
height: 2rem; | |
border-radius: 100%; | |
cursor: pointer; | |
z-index: 7; | |
} | |
.pl-image .bar button#shutter { | |
width: 3rem; | |
height: 3rem; | |
margin: -1.5rem auto .2rem auto; | |
} | |
.pl-image video.takepicture { | |
animation: pictureflash 0ms linear; | |
} | |
@keyframes pictureflash { | |
0% { | |
filter: grayscale(1.0) contrast(2.0); | |
} | |
100% { | |
filter: grayscale(0.0) contrast(1.0); | |
} | |
} | |
</style> | |
<div id="video-container"> | |
<div id="top" class="bar"> | |
<button id="stop" title="Stop video">✖</button> | |
<button id="pop-out" title="Pop out/pop in">⏏</button> | |
</div> | |
<video playsinline autoplay></video> | |
<div id="bottom" class="bar"> | |
<button id="shutter" title="Click to take a picture">📷</button> | |
</div> | |
</div> | |
<div id="prompt"> | |
<span> | |
Enable webcam | |
</span> | |
</div> | |
<script> | |
// based on https://github.com/fonsp/printi-static (by the same author) | |
const span = currentScript.parentElement | |
const video = span.querySelector("video") | |
const popout = span.querySelector("button#pop-out") | |
const stop = span.querySelector("button#stop") | |
const shutter = span.querySelector("button#shutter") | |
const prompt = span.querySelector(".pl-image #prompt") | |
const maxsize = $(max_size) | |
const send_source = (source, src_width, src_height) => { | |
const scale = Math.min(1.0, maxsize / src_width, maxsize / src_height) | |
// const width = Math.floor(src_width * scale) | |
// const height = Math.floor(src_height * scale) | |
const width = Math.floor(src_width) | |
const height = Math.floor(src_height) | |
const canvas = html`<canvas width=\${width} height=\${height}>` | |
const ctx = canvas.getContext("2d") | |
ctx.drawImage(source, 0, 0, width, height) | |
span.value = { | |
width: width, | |
height: height, | |
data: ctx.getImageData(0, 0, width, height).data, | |
} | |
span.dispatchEvent(new CustomEvent("input")) | |
} | |
const clear_camera = () => { | |
window.stream.getTracks().forEach(s => s.stop()); | |
video.srcObject = null; | |
span.classList.add("waiting-for-permission"); | |
} | |
prompt.onclick = () => { | |
navigator.mediaDevices.getUserMedia({ | |
audio: false, | |
video: { | |
facingMode: "environment", | |
}, | |
}).then(function(stream) { | |
stream.onend = console.log | |
window.stream = stream | |
video.srcObject = stream | |
window.cameraConnected = true | |
video.controls = false | |
video.play() | |
video.controls = false | |
span.classList.remove("waiting-for-permission"); | |
}).catch(function(error) { | |
console.log(error) | |
}); | |
} | |
stop.onclick = () => { | |
clear_camera() | |
} | |
popout.onclick = () => { | |
span.classList.toggle("popped-out") | |
} | |
var intervalId = window.setInterval(function(){ | |
const cl = video.classList | |
cl.remove("takepicture") | |
void video.offsetHeight | |
cl.add("takepicture") | |
video.play() | |
video.controls = false | |
send_source(video, video.videoWidth, video.videoHeight) | |
}, 150); | |
shutter.onclick = () => { | |
const cl = video.classList | |
cl.remove("takepicture") | |
void video.offsetHeight | |
cl.add("takepicture") | |
video.play() | |
video.controls = false | |
send_source(video, video.videoWidth, video.videoHeight) | |
} | |
document.addEventListener("visibilitychange", () => { | |
if (document.visibilityState != "visible") { | |
clear_camera() | |
} | |
}) | |
// Set a default image | |
const img = html`<img crossOrigin="anonymous">` | |
img.onload = () => { | |
console.log("helloo") | |
send_source(img, img.width, img.height) | |
} | |
img.src = "$(default_url)" | |
console.log(img) | |
</script> | |
</span> | |
""" |> HTML | |
end | |
function process_raw_camera_data(raw_camera_data) | |
# the raw image data is a long byte array, we need to transform it into something | |
# more "Julian" - something with more _structure_. | |
# The encoding of the raw byte stream is: | |
# every 4 bytes is a single pixel | |
# every pixel has 4 values: Red, Green, Blue, Alpha | |
# (we ignore alpha for this notebook) | |
# So to get the red values for each pixel, we take every 4th value, starting at | |
# the 1st: | |
reds_flat = UInt8.(raw_camera_data["data"][1:4:end]) | |
# greens_flat = UInt8.(raw_camera_data["data"][2:4:end]) | |
# blues_flat = UInt8.(raw_camera_data["data"][3:4:end]) | |
# but these are still 1-dimensional arrays, nicknamed 'flat' arrays | |
# We will 'reshape' this into 2D arrays: | |
width = raw_camera_data["width"] | |
height = raw_camera_data["height"] | |
# shuffle and flip to get it in the right shape | |
# reds = reshape(reds_flat, (width, height))' / 255.0 | |
reds = reshape(reds_flat, (width, height))' / 255.0 | |
# greens = reshape(greens_flat, (width, height))' / 255.0 | |
# blues = reshape(blues_flat, (width, height))' / 255.0 | |
# we have our 2D array for each color | |
# Let's create a single 2D array, where each value contains the R, G and B value of | |
# that pixel | |
# RGB.(reds, greens, blues) | |
Gray{N0f8}.(reds) | |
end | |
end | |
# ╔═╡ 1a0324de-ee19-11ea-1d4d-db37f4136ad3 | |
@bind raw_camera_data camera_input(;max_size=100) | |
# ╔═╡ f2236406-af64-403d-84bd-e3afe395b791 | |
html"""<style> | |
main { | |
max-width: 900px; | |
} | |
""" | |
# ╔═╡ 3687d6ca-5cd2-4e6b-b0b1-7c7bbc791870 | |
A = [1 2 3;4 1 6;7 8 1] | |
# ╔═╡ 25d1a770-a2cc-4fcc-8e95-c240b2dff67d | |
A' | |
# ╔═╡ e2516aef-c97e-40e6-bf14-26dd24fe8e42 | |
tr(A) # sum along diagonal | |
# ╔═╡ a092c79d-f568-4942-88cf-060770324cd0 | |
det(A) | |
# ╔═╡ 0637aecb-0d91-428b-a968-e05677bf83a8 | |
inv(A) | |
# ╔═╡ 63685547-1b8f-4a81-a08b-b4cd037188af | |
eigvals(A) | |
# ╔═╡ f0184dae-b562-469e-8263-63a565fea9cb | |
eigvecs(A) | |
# ╔═╡ 9148b86f-62c1-43d8-9b3a-99f293005f59 | |
d = factorize(A) | |
# ╔═╡ 1e4cc89e-585e-4803-8abc-f2cad3832877 | |
d.factors | |
# ╔═╡ 834d4ec1-5684-4302-b79c-993971ee870d | |
d.ipiv | |
# ╔═╡ 6da6035a-4815-42b1-914f-ca8ebe276324 | |
d.info | |
# ╔═╡ ee6d050b-b2b0-4f2e-b611-a9a427e1c6b7 | |
[1 1 3;4 5 6;7 8 9] * [1;2;3] | |
# ╔═╡ f9f91642-5619-4521-a47c-26a628c7e9a5 | |
begin | |
# | |
x_i = f* (x_c/z_c) | |
y_i = f * (y_c/z_c) | |
# m_x pixel density (pixels/mm) in x dir | |
# m_y | |
# o_x, o_y is principle point val | |
u = m_x * x_i + o_x | |
v = m_y * y_i + o_y | |
f_x = m_x * f | |
f_y = m_y * f | |
# intrinsics, definite camera internal geomtry | |
# f_x, f_y, o_x,o_y | |
# equation of perspective projection are non linerar | |
function homogen2to3(u::Float, v::Float) | |
w_t = 2 | |
u_t = u * w_t | |
v_t = v * w_t | |
return u_t, v_t, w_t | |
end | |
function homogen3to4(u::Float, v::Float, z::Float) | |
w_t = 2 | |
u_t = u * w_t | |
v_t = v * w_t | |
z_t = z * w_t | |
return u_t, v_t, z_t, w_t | |
end | |
intrinsic_mtr = [f_x 0 o_x 0; 0 f_y 0_y 0;0 0 1 0] | |
extrinsic_mtr = [r11 r12 r13 t_x;r21 r22 r23 t_y;r31 r32 r33 t_z;0 0 0 1] | |
u = intrinsic_mtr * X_c | |
X_c = extrinsic_mtr * X_w | |
u = intrinsic_mtr * entrinsic_mtr * X_w | |
P = intrinsic_mtr * entrinsic_mtr | |
end | |
# ╔═╡ 5fb9ad50-9647-4a55-9e4c-ba4246b43b80 | |
b = [2; 3; 4; 1]/[2; 3; 1] | |
# ╔═╡ 914393a3-4bf0-4771-b880-9e1939fd8873 | |
Q,R = qr(b) | |
# ╔═╡ e6514a59-8e71-471b-be7d-d43554d09dcb | |
c = [2; 3; 4; 1] \ [2; 3; 1] | |
# ╔═╡ b258a32c-539f-44b4-babf-6e83a81be703 | |
begin | |
""" | |
function innercorners(length::Int, width::Int) | |
return innercorners in a checkerboard of size length * width | |
""" | |
function innercorners(length::Int, width::Int) | |
(length - 1) * (width - 1) | |
end | |
""" | |
allboardcorners(length::Int, width::Int) | |
returns allboardcorners in a checkerboard of size length * width | |
""" | |
function allcorners(length::Int, width::Int) | |
(length + 1) * (width + 1) | |
end | |
function drawdots!(img, res, color; size = 5) | |
for i in res | |
img[i[1]-size:i[1]+size i[2]-size:i[2]+size] .= color | |
end | |
end | |
function draw_rect(img, dots, color = Gray{N0f8}(1); size = 5) | |
for i in dots | |
# img[i[1]-5:i[1]+5, i[2]-5:i[2]+5] .= color | |
ImageDraw.draw!( | |
img, | |
ImageDraw.Polygon( | |
RectanglePoints( | |
ImageDraw.Point(i[2] - size, i[1] - size), | |
ImageDraw.Point(i[2] + size, i[1] + size), | |
), | |
), | |
color, | |
) | |
end | |
end | |
""" | |
""" | |
function kxkneighboardhood( | |
chessboard, | |
refined1; | |
stdatol = 0.1, | |
cortol = 0.6, | |
n = 25, | |
m = 11, | |
k = 13, | |
) | |
reut = zeros(Bool, length(refined1)) | |
board = Float32.(chessboard) | |
# @info board | |
for (idx, i) in enumerate(refined1) | |
x, y = i[1], i[2] | |
# std1 = !isapprox(std(p1), std(p2); atol = stdatol) | |
# std2 = !isapprox(std(p3), std(p4); atol = stdatol) | |
# cor1 = 10^((cor(vec(p1), vec(reverse(p2))) / 0.8) -1) > cortol | |
# cor2 = 10^((cor(vec(p3), vec(reverse(p4))) / 0.8) -1) > cortol | |
# if std1 || std2 || !cor1 || !cor2 | |
# continue | |
# end | |
imgtest = board[x-n:x+n, y-n:y+n] | |
res = cor(vec(imgtest), vec(reverse(imgtest))) | |
# if std1 || std2 || res < 0.7 | |
# continue | |
# end | |
if res < 0.7 | |
continue | |
end | |
reut[idx] = true | |
end | |
refined2 = map((x, y) -> y ? x : nothing, refined1, reut) | |
refined3 = filter(x -> x !== nothing, refined2) | |
end | |
""" | |
nonmaxsuppresion(refined3) | |
returns checkboard refined points after non maximal suppresion, currently uses mean | |
""" | |
function nonmaxsuppresion(refined3) | |
checked = Set([]) | |
final1 = [] | |
function dista(i, j) | |
sqrt((i[1] - j[1])^2 + (i[2] - j[2])^2) | |
end | |
for (idxi, i) in enumerate(refined3) | |
if idxi ∉ checked | |
dist = [] | |
for (idxj, j) in enumerate(refined3) | |
if idxj ∉ checked | |
dist1 = dista(i, j) | |
if dist1 < 10 | |
push!(dist, idxj) | |
end | |
end | |
end | |
local mat = zeros(1, 2) | |
for i in dist | |
i = refined3[i] | |
mat = vcat([i[1] i[2]], mat) | |
end | |
res = Int64.(floor.(mean(mat[1:end-1, :]; dims = 1))) | |
map(d -> push!(checked, d), dist) | |
value = CartesianIndex(res[1], res[2]) | |
push!(final1, value) | |
end | |
end | |
final1 | |
end | |
""" | |
markcorners(img::AbstractArray; method = harris, crthres = Percentile(99), LoGparams = 2.0.^[3.0], filter = (5,5), returnimg = true) | |
returns corners of checkerboard in an image with size length * width | |
### Arguments | |
- `img`: image to be processed | |
- `method`: method to be used for corner detection | |
- `crthres`: threshold for corner imcorner method | |
- `LoGparams`: parameters for LoG filter | |
- `filter`: size of filter for mapwindow | |
- `returnimg`: if true, returns image with corners marked | |
### Example | |
```jl | |
using CameraCalibrations | |
``` | |
""" | |
function markcorners( | |
img::AbstractArray; | |
method = harris, | |
crthres = Percentile(99), | |
LoGparams = 2.0 .^ [3.0], | |
filter = (5, 5), | |
returnimg = false, | |
) | |
imagecorners = imcorner(img, crthres; method = harris) | |
img_cleaned = dilate(mapwindow(median!, (Gray.(imagecorners)), filter)) | |
results = blob_LoG(Int64.(img_cleaned), LoGparams) | |
if returnimg == true | |
resultantimage = zeros(size(img)) | |
map(x -> resultantimage[x.location] = 1, results) | |
return map(x -> x.location, results), Gray.(resultantimage) | |
else | |
return map(x -> x.location, results) | |
end | |
end | |
""" | |
segboundariescheck(imgs; numcheck = 4) | |
returns if the boundaries of the images has different segments to detect different segments: | |
### Arguments | |
- `imgs`: images to be processed | |
- `numcheck`: number of segments in boundary to be detected | |
Retuns a bool array for of the images and if they satisfy the condition, we return true else false | |
### Example | |
We have a corner that looks something like below: | |
000111 | |
000111 | |
111000 | |
111000 | |
We want to check if the boundary is made up of 4 segments. | |
Boundary in this case would be : 000111100001111 starting from top edges and going clockwise. | |
In this we can say we have 4 changes in the boundary. | |
Numcheck is the number of changes we want to check. | |
""" | |
function segboundariescheck(imgs; numcheck = 4) | |
check = zeros(Bool, length(imgs)) | |
for (idx, i) in enumerate(imgs) | |
a = vcat(i[1, :], i[:, end], reverse(i[end, :]), reverse(i[:, 1])) | |
numchange = 0 | |
for num = 2:length(a) | |
if a[num] == 1 && a[num-1] == 0 || a[num] == 0 && a[num-1] == 1 | |
numchange = numchange + 1 | |
end | |
end | |
if numchange == numcheck | |
check[idx] = true | |
end | |
end | |
check | |
end | |
""" | |
checkboundaries(checkerboard, cords; pixels = [11,23,35]) | |
returns true if boundaries satisfy segboundariescheck for a range of pixels regions | |
### Arguments | |
- 'checkerboard': checkerboard img to be processed | |
- 'cords' : array of cartesian indices which indicaes corners in a image | |
- `pixels`: array of pixels region to be checked centered at cords | |
""" | |
function checkboundaries(checkerboard, cords; pixels = [11, 23, 35]) | |
currentstate = zeros(Bool, length(cords)) | |
# assumes that checkboard is gray | |
checkerboard = checkerboard .> 0.4 | |
for n in pixels | |
n = Int(floor((n - 1) / 2)) - 1 | |
res = map(x -> checkerboard[x[1]-n:x[1]+n, x[2]-n:x[2]+n], cords) | |
# # res = map(x-> Gray.(x .> meanfinite(x)), corners) | |
# res = map(x-> x .> 0.4, corners) | |
check = segboundariescheck(res) | |
currentstate = map(x -> (x > 0) ? true : false, currentstate .+ check) | |
end | |
refined = map((x, y) -> y ? x : nothing, cords, currentstate) | |
refined1 = filter(x -> x !== nothing, refined) | |
end | |
""" | |
processes the checkerboard | |
""" | |
function process_image(chessboard) | |
# we need a algorithm to check if there is a checkerboard or not in image | |
# still need to study how filters from ImageFiltering.jl can improve results | |
imagecorners = imcorner(chessboard, Percentile(99); method = Images.harris) | |
# imagecorners = fastcorners(chessboard, 11, 0.20) # still gotta check if this is worth it | |
imagecorners = clearborder(imagecorners, 35) # 35 is the boundary width we change | |
results = | |
map(x -> imagecorners[x] == true ? x : nothing, CartesianIndices(imagecorners)) | |
results = filter(x -> x !== nothing, results) | |
correlationcheck = kxkneighboardhood(chessboard, results;) | |
bounds = checkboundaries(chessboard, correlationcheck; pixels = [11, 23, 35]) | |
# also we need algorithm for checking if we have a board now, with connected components still | |
# also we need algorithm for checking if we have outliers and remove them if they exist | |
finalcorners = nonmaxsuppresion(bounds) # return checkboard points | |
return Vector{CartesianIndex{2}}(finalcorners) | |
end | |
end | |
# ╔═╡ dfb7c6be-ee0d-11ea-194e-9758857f7b20 | |
begin | |
### Important | |
### Object tracker is here | |
function objecttracker( | |
img, | |
h = 50, | |
s = 255, | |
v = 255, | |
lh = 0, | |
ls = 20, | |
lv = 70, | |
boundingboxvar = 10 | |
) | |
hsv_img = HSV.(img) | |
channels = channelview(float.(hsv_img)) | |
hue_img = channels[1, :, :] | |
val_img = channels[3, :, :] | |
satur_img = channels[2, :, :] | |
mask = zeros(size(hue_img)) | |
h, s, v = h, s, v | |
h1, s1, v1 = lh, ls, lv | |
ex = boundingboxvar | |
for ind in eachindex(hue_img) | |
if hue_img[ind] <= h && satur_img[ind] <= s / 255 && val_img[ind] <= v / 255 | |
if hue_img[ind] >= h1 && satur_img[ind] >= s1 / 255 && val_img[ind] >= v1 / 255 | |
mask[ind] = 1 | |
end | |
end | |
end | |
img = mapwindow(ImageFiltering.median, dilate(mask), (3, 3)) | |
contours = find_contours(img) | |
try | |
convhull = convexhull(img .> 0.5) | |
push!(convhull, convhull[1]) | |
res = findconvexitydefects(contours[1], convhull; dist=3, absdiff = 2, currsize= 30, mindist =6) | |
img_convex1 = RGB{N0f8}.(ones(size(img))) | |
drawdots!(img_convex1, res, RGB(0,0,1)) | |
draw!(img_convex1, ImageDraw.Path(convhull), RGB(0)) | |
draw_contours(img_convex1, RGB(0), contours) | |
return img_convex1, size(res)[1] | |
catch e | |
img_convex1 = RGB{N0f8}.(ones(size(img))) | |
draw_contours(img_convex1, RGB(0), contours) | |
return img_convex1 , -1 | |
# return Gray.(img), e | |
# return Gray.(img) , 0 | |
end | |
end; | |
end | |
# ╔═╡ 594acafd-01d4-4eee-b9e6-5b886953b5b1 | |
begin | |
image = process_raw_camera_data(raw_camera_data) | |
res = process_image(image) | |
data_folder = "images/" | |
if size(res)[1] == 63 | |
Images.save(data_folder * "$(Dates.datetime2epochms(now())).png", image) | |
draw_rect(image, res, Gray(1)) | |
end | |
end | |
# ╔═╡ b2c29b9f-45ad-4736-ba49-49011b8bd7d7 | |
img = reverse(load("images\\63857712960954.png");dims=2) | |
# ╔═╡ 7a80d3c9-050a-421e-8a24-3a86ded52b3a | |
image1 = deepcopy(img) | |
# ╔═╡ 9a14c78a-f90f-4049-8397-cfd228230e1a | |
output = process_image(image1) | |
# ╔═╡ 37f2c3e5-21dc-44ec-b780-4389c9d22d2b | |
x = sort(output) | |
# ╔═╡ 0ec97ce8-ffc6-489e-a635-1e96b630cf8e | |
begin | |
draw_rect(image1, sort(output)[15:21], Gray(1)) | |
image1 | |
end | |
# ╔═╡ 1d4495d7-2fa5-4b45-bbe4-00a5a2bb4d16 | |
[-X -Y -1 0 0 0 u*X u*Y u; 0 0 0 -X -Y -1 v*X v*Y v] | |
# ╔═╡ c0288129-85c4-4ed7-8553-bbb91b655d8f | |
126/2 | |
# ╔═╡ a356ec89-ba9c-434c-8fb9-258eb0e3a382 | |
begin | |
data = [] | |
for (idx,i) in enumerate(x) | |
# @info idx i | |
u = i[1] | |
v = i[2] | |
X = 20 * (round(Int, idx/7) + 1) | |
Y = 20 * (idx%7) | |
res = [-X -Y -1 0 0 0 u*X u*Y u; 0 0 0 -X -Y -1 v*X v*Y v] | |
push!(data, res) | |
end | |
Amat = vcat(data...) | |
u2, s, vh = svd(Amat) | |
min_idx = findmin(s)[2] | |
@info min_idx | |
vh[min_idx,:] | |
end | |
# ╔═╡ 4e62057e-bf95-4701-805f-63085487e4f1 | |
py""" | |
from __future__ import print_function, division | |
import os | |
import glob | |
import sys, argparse | |
import pprint | |
import numpy as np | |
import cv2 | |
from scipy import optimize as opt | |
np.set_printoptions(suppress=True) | |
puts = pprint.pprint | |
DATA_DIR = "/home/kushal/KV/IP/calib_linear/data/" | |
DEBUG_DIR = "/home/kushal/KV/IP/calib_linear/data/debug/" | |
PATTERN_SIZE = (9, 6) | |
SQUARE_SIZE = 1.0 # | |
def show_image(string, image): | |
cv2.imshow(string, image) | |
cv2.waitKey() | |
def get_camera_images(): | |
images = [each for each in glob.glob(DATA_DIR + "*.jpg")] | |
images = sorted(images) | |
for each in images: | |
yield (each, cv2.imread(each, 0)) | |
# yield [(each, cv2.imread(each, 0)) for each in images] | |
def getChessboardCorners(images = None, visualize=False): | |
objp = np.zeros((PATTERN_SIZE[1]*PATTERN_SIZE[0], 3), dtype=np.float64) | |
# objp[:,:2] = np.mgrid[0:PATTERN_SIZE[1], 0:PATTERN_SIZE[0]].T.reshape(-1, 2) | |
objp[:, :2] = np.indices(PATTERN_SIZE).T.reshape(-1, 2) | |
objp *= SQUARE_SIZE | |
chessboard_corners = [] | |
image_points = [] | |
object_points = [] | |
correspondences = [] | |
ctr=0 | |
for (path, each) in get_camera_images(): #images: | |
print("Processing Image : ", path) | |
ret, corners = cv2.findChessboardCorners(each, patternSize=PATTERN_SIZE) | |
if ret: | |
print ("Chessboard Detected ") | |
corners = corners.reshape(-1, 2) | |
# corners = corners.astype(np.int) | |
# corners = corners.astype(np.float64) | |
if corners.shape[0] == objp.shape[0] : | |
# print(objp[:,:-1].shape) | |
image_points.append(corners) | |
object_points.append(objp[:,:-1]) #append only World_X, World_Y. Because World_Z is ZERO. Just a simple modification for get_normalization_matrix | |
assert corners.shape == objp[:, :-1].shape, "mismatch shape corners and objp[:,:-1]" | |
correspondences.append([corners.astype(np.int), objp[:, :-1].astype(np.int)]) | |
if visualize: | |
# Draw and display the corners | |
ec = cv2.cvtColor(each, cv2.COLOR_GRAY2BGR) | |
cv2.drawChessboardCorners(ec, PATTERN_SIZE, corners, ret) | |
cv2.imwrite(DEBUG_DIR + str(ctr)+".png", ec) | |
# show_image("mgri", ec) | |
# | |
else: | |
print ("Error in detection points", ctr) | |
ctr+=1 | |
# sys.exit(1) | |
return correspondences | |
def normalize_points(chessboard_correspondences): | |
views = len(chessboard_correspondences) | |
def get_normalization_matrix(pts, name="A"): | |
pts = pts.astype(np.float64) | |
x_mean, y_mean = np.mean(pts, axis=0) | |
var_x, var_y = np.var(pts, axis=0) | |
s_x , s_y = np.sqrt(2/var_x), np.sqrt(2/var_y) | |
print("Matrix: {4} : meanx {0}, meany {1}, varx {2}, vary {3}, sx {5}, sy {6} ".format(x_mean, y_mean, var_x, var_y, name, s_x, s_y)) | |
n = np.array([[s_x, 0, -s_x*x_mean], [0, s_y, -s_y*y_mean], [0, 0, 1]]) | |
# print(n) | |
n_inv = np.array([ [1./s_x , 0 , x_mean], [0, 1./s_y, y_mean] , [0, 0, 1] ]) | |
return n.astype(np.float64), n_inv.astype(np.float64) | |
ret_correspondences = [] | |
for i in xrange(views): | |
imp, objp = chessboard_correspondences[i] | |
N_x, N_x_inv = get_normalization_matrix(objp, "A") | |
N_u, N_u_inv = get_normalization_matrix(imp, "B") | |
# print(N_x) | |
# print(N_u) | |
# convert imp, objp to homogeneous | |
# hom_imp = np.array([np.array([[each[0]], [each[1]], [1.0]]) for each in imp]) | |
# hom_objp = np.array([np.array([[each[0]], [each[1]], [1.0]]) for each in objp]) | |
hom_imp = np.array([ [[each[0]], [each[1]], [1.0]] for each in imp]) | |
hom_objp = np.array([ [[each[0]], [each[1]], [1.0]] for each in objp]) | |
normalized_hom_imp = hom_imp | |
normalized_hom_objp = hom_objp | |
for i in range(normalized_hom_objp.shape[0]): | |
# 54 points. iterate one by onea | |
# all points are homogeneous | |
n_o = np.matmul(N_x,normalized_hom_objp[i]) | |
normalized_hom_objp[i] = n_o/n_o[-1] | |
n_u = np.matmul(N_u,normalized_hom_imp[i]) | |
normalized_hom_imp[i] = n_u/n_u[-1] | |
normalized_objp = normalized_hom_objp.reshape(normalized_hom_objp.shape[0], normalized_hom_objp.shape[1]) | |
normalized_imp = normalized_hom_imp.reshape(normalized_hom_imp.shape[0], normalized_hom_imp.shape[1]) | |
normalized_objp = normalized_objp[:,:-1] | |
normalized_imp = normalized_imp[:,:-1] | |
# print(normalized_imp) | |
ret_correspondences.append((imp, objp, normalized_imp, normalized_objp, N_u, N_x, N_u_inv, N_x_inv)) | |
return ret_correspondences | |
def compute_view_based_homography(correspondence, reproj = False): | |
""" | |
# ╔═╡ 55f91b70-afa8-4af8-bc65-b62a13cf874e | |
correspondence = (imp, objp, normalized_imp, normalized_objp, N_u, N_x, N_u_inv, N_x_inv) | |
# ╔═╡ 420b7065-fb2d-494c-8d3f-294dab55e777 | |
""" | |
image_points = correspondence[0] | |
object_points = correspondence[1] | |
normalized_image_points = correspondence[2] | |
normalized_object_points = correspondence[3] | |
N_u = correspondence[4] | |
N_x = correspondence[5] | |
N_u_inv = correspondence[6] | |
N_x_inv = correspondence[7] | |
N = len(image_points) | |
print("Number of points in current view : ", N) | |
M = np.zeros((2*N, 9), dtype=np.float64) | |
print("Shape of Matrix M : ", M.shape) | |
print("N_model\n", N_x) | |
print("N_observed\n", N_u) | |
# create row wise allotment for each 0-2i rows | |
# that means 2 rows.. | |
for i in xrange(N): | |
X, Y = normalized_object_points[i] #A | |
u, v = normalized_image_points[i] #B | |
row_1 = np.array([ -X, -Y, -1, 0, 0, 0, X*u, Y*u, u]) | |
row_2 = np.array([ 0, 0, 0, -X, -Y, -1, X*v, Y*v, v]) | |
M[2*i] = row_1 | |
M[(2*i) + 1] = row_2 | |
print ("p_model {0} \t p_obs {1}".format((X, Y), (u, v))) | |
# M.h = 0 . solve system of linear equations using SVD | |
u, s, vh = np.linalg.svd(M) | |
print("Computing SVD of M") | |
# print("U : Shape {0} : {1}".format(u.shape, u)) | |
# print("S : Shape {0} : {1}".format(s.shape, s)) | |
# print("V_t : Shape {0} : {1}".format(vh.shape, vh)) | |
# print(s, np.argmin(s)) | |
h_norm = vh[np.argmin(s)] | |
h_norm = h_norm.reshape(3, 3) | |
# print("Normalized Homography Matrix : \n" , h_norm) | |
print(N_u_inv) | |
print(N_x) | |
# h = h_norm | |
h = np.matmul(np.matmul(N_u_inv,h_norm), N_x) | |
# if abs(h[2, 2]) > 10e-8: | |
h = h[:,:]/h[2, 2] | |
print("Homography for View : \n", h ) | |
if reproj: | |
reproj_error = 0 | |
for i in range(len(image_points)): | |
t1 = np.array([[object_points[i][0]], [object_points[i][1]], [1.0]]) | |
t = np.matmul(h, t1).reshape(1, 3) | |
t = t/t[0][-1] | |
formatstring = "Imp {0} | ObjP {1} | Tx {2}".format(image_points[i], object_points[i], t) | |
print(formatstring) | |
reproj_error += np.sum(np.abs(image_points[i] - t[0][:-1])) | |
reproj_error = np.sqrt(reproj_error/N)/100.0 | |
print("Reprojection error : ", reproj_error) | |
return h | |
def minimizer_func(initial_guess, X, Y, h, N): | |
# X : normalized object points flattened | |
# Y : normalized image points flattened | |
# h : homography flattened | |
# N : number of points | |
# | |
x_j = X.reshape(N, 2) | |
# Y = Y.reshape(N, 2) | |
# h = h.reshape(3, 3) | |
projected = [0 for i in xrange(2*N)] | |
for j in range(N): | |
x, y = x_j[j] | |
w = h[6]*x + h[7]*y + h[8] | |
# pts = np.matmul(np.array([ [h[0], h[1], h[2]] , [h[3], h[4], h[5]]]), np.array([ [x] , [y] , [1.]])) | |
# pts = pts/float(w) | |
# u, v = pts[0][0], pts[1][0] | |
projected[2*j] = (h[0] * x + h[1] * y + h[2]) / w | |
projected[2*j + 1] = (h[3] * x + h[4] * y + h[5]) / w | |
# return projected | |
return (np.abs(projected - Y))**2 | |
def jac_function(initial_guess, X, Y, h, N): | |
x_j = X.reshape(N, 2) | |
jacobian = np.zeros( (2*N, 9) , np.float64) | |
for j in range(N): | |
x, y = x_j[j] | |
sx = np.float64(h[0]*x + h[1]*y + h[2]) | |
sy = np.float64(h[3]*x + h[4]*y + h[5]) | |
w = np.float64(h[6]*x + h[7]*y + h[8]) | |
jacobian[2*j] = np.array([x/w, y/w, 1/w, 0, 0, 0, -sx*x/w**2, -sx*y/w**2, -sx/w**2]) | |
jacobian[2*j + 1] = np.array([0, 0, 0, x/w, y/w, 1/w, -sy*x/w**2, -sy*y/w**2, -sy/w**2]) | |
return jacobian | |
def refine_homographies(H, correspondences, skip=False): | |
if skip: | |
return H | |
image_points = correspondence[0] | |
object_points = correspondence[1] | |
normalized_image_points = correspondence[2] | |
normalized_object_points = correspondence[3] | |
N_u = correspondence[4] | |
N_x = correspondence[5] | |
N_u_inv = correspondence[6] | |
N_x_inv = correspondence[7] | |
N = normalized_object_points.shape[0] | |
X = object_points.flatten() | |
Y = image_points.flatten() | |
h = H.flatten() | |
h_prime = opt.least_squares(fun=minimizer_func, x0=h, jac=jac_function, method="lm" , args=[X, Y, h, N], verbose=0) | |
if h_prime.success: | |
H = h_prime.x.reshape(3, 3) | |
H = H/H[2, 2] | |
return H | |
def get_intrinsic_parameters(H_r): | |
M = len(H_r) | |
V = np.zeros((2*M, 6), np.float64) | |
def v_pq(p, q, H): | |
v = np.array([ | |
H[0, p]*H[0, q], | |
H[0, p]*H[1, q] + H[1, p]*H[0, q], | |
H[1, p]*H[1, q], | |
H[2, p]*H[0, q] + H[0, p]*H[2, q], | |
H[2, p]*H[1, q] + H[1, p]*H[2, q], | |
H[2, p]*H[2, q] | |
]) | |
return v | |
for i in range(M): | |
H = H_r[i] | |
V[2*i] = v_pq(p=0, q=1, H=H) | |
V[2*i + 1] = np.subtract(v_pq(p=0, q=0, H=H), v_pq(p=1, q=1, H=H)) | |
# solve V.b = 0 | |
u, s, vh = np.linalg.svd(V) | |
# print(u, "\n", s, "\n", vh) | |
b = vh[np.argmin(s)] | |
print("V.b = 0 Solution : ", b.shape) | |
# according to zhangs method | |
vc = (b[1]*b[3] - b[0]*b[4])/(b[0]*b[2] - b[1]**2) | |
l = b[5] - (b[3]**2 + vc*(b[1]*b[2] - b[0]*b[4]))/b[0] | |
alpha = np.sqrt((l/b[0])) | |
beta = np.sqrt(((l*b[0])/(b[0]*b[2] - b[1]**2))) | |
gamma = -1*((b[1])*(alpha**2) *(beta/l)) | |
uc = (gamma*vc/beta) - (b[3]*(alpha**2)/l) | |
print([vc, | |
l, | |
alpha, | |
beta, | |
gamma, | |
uc]) | |
A = np.array([ | |
[alpha, gamma, uc], | |
[0, beta, vc], | |
[0, 0, 1.0], | |
]) | |
print("Intrinsic Camera Matrix is :") | |
print(A) | |
return A | |
""" | |
# ╔═╡ f00f6dc1-3b39-4403-8daf-80efd28f0940 | |
# images = get_camera_images() | |
chessboard_correspondences = py"getChessboardCorners"(images=None, visualize = True) | |
# ╔═╡ 1885c4ae-d91c-4b0c-8ad6-4b977833b173 | |
np = pyimport("numpy") | |
# ╔═╡ c6f3eb17-91fb-4a0d-b418-5e1fed4a6853 | |
u1, s1, vh1 = np.linalg.svd(Amat) | |
# ╔═╡ f973d502-b29a-4c3a-b609-1710b199f82c | |
vh1 | |
# ╔═╡ 44c37a05-3d7d-49bd-8e5c-cf01588da34e | |
val_idx =np.argmin(s1) | |
# ╔═╡ 012b465d-60c0-44f3-96f3-563c89d2ac6e | |
# ╔═╡ d7d76f4b-1475-42f1-afb6-c15dee869c47 | |
h_norm = vh1[8] | |
# ╔═╡ 146042f0-73cb-49e7-a7fb-0f5d05a1d018 | |
h_norm1 = h_norm.reshape(3, 3) | |
# ╔═╡ Cell order: | |
# ╠═4934e4de-b03d-419f-a076-9a8116f5ddf5 | |
# ╠═14519106-d4cf-4a77-acca-a22b7c426334 | |
# ╟─dfb7c6be-ee0d-11ea-194e-9758857f7b20 | |
# ╟─1a0324de-ee19-11ea-1d4d-db37f4136ad3 | |
# ╠═11bb0034-d018-4417-9d76-e9509403d542 | |
# ╠═c11e3475-1835-462f-bdc5-c3f2592b1980 | |
# ╠═594acafd-01d4-4eee-b9e6-5b886953b5b1 | |
# ╠═cb0ef50c-1f87-45f3-ad3f-339cc15098c1 | |
# ╟─f5642319-05ee-4731-ad26-80bcd4f6aa7b | |
# ╟─f2236406-af64-403d-84bd-e3afe395b791 | |
# ╠═3687d6ca-5cd2-4e6b-b0b1-7c7bbc791870 | |
# ╠═25d1a770-a2cc-4fcc-8e95-c240b2dff67d | |
# ╠═e2516aef-c97e-40e6-bf14-26dd24fe8e42 | |
# ╠═a092c79d-f568-4942-88cf-060770324cd0 | |
# ╠═0637aecb-0d91-428b-a968-e05677bf83a8 | |
# ╠═63685547-1b8f-4a81-a08b-b4cd037188af | |
# ╠═f0184dae-b562-469e-8263-63a565fea9cb | |
# ╠═9148b86f-62c1-43d8-9b3a-99f293005f59 | |
# ╠═1e4cc89e-585e-4803-8abc-f2cad3832877 | |
# ╠═834d4ec1-5684-4302-b79c-993971ee870d | |
# ╠═6da6035a-4815-42b1-914f-ca8ebe276324 | |
# ╠═ee6d050b-b2b0-4f2e-b611-a9a427e1c6b7 | |
# ╠═f9f91642-5619-4521-a47c-26a628c7e9a5 | |
# ╠═5fb9ad50-9647-4a55-9e4c-ba4246b43b80 | |
# ╠═914393a3-4bf0-4771-b880-9e1939fd8873 | |
# ╠═e6514a59-8e71-471b-be7d-d43554d09dcb | |
# ╟─b258a32c-539f-44b4-babf-6e83a81be703 | |
# ╠═b2c29b9f-45ad-4736-ba49-49011b8bd7d7 | |
# ╠═7a80d3c9-050a-421e-8a24-3a86ded52b3a | |
# ╠═9a14c78a-f90f-4049-8397-cfd228230e1a | |
# ╠═37f2c3e5-21dc-44ec-b780-4389c9d22d2b | |
# ╠═0ec97ce8-ffc6-489e-a635-1e96b630cf8e | |
# ╠═1d4495d7-2fa5-4b45-bbe4-00a5a2bb4d16 | |
# ╠═c0288129-85c4-4ed7-8553-bbb91b655d8f | |
# ╠═a356ec89-ba9c-434c-8fb9-258eb0e3a382 | |
# ╠═15321a8a-0714-4f44-91a4-99a54c29e46f | |
# ╠═a51f6584-b4b6-494a-9456-3d4ef0d74112 | |
# ╠═4e62057e-bf95-4701-805f-63085487e4f1 | |
# ╠═55f91b70-afa8-4af8-bc65-b62a13cf874e | |
# ╠═420b7065-fb2d-494c-8d3f-294dab55e777 | |
# ╠═f00f6dc1-3b39-4403-8daf-80efd28f0940 | |
# ╠═1885c4ae-d91c-4b0c-8ad6-4b977833b173 | |
# ╠═c6f3eb17-91fb-4a0d-b418-5e1fed4a6853 | |
# ╠═f973d502-b29a-4c3a-b609-1710b199f82c | |
# ╠═44c37a05-3d7d-49bd-8e5c-cf01588da34e | |
# ╠═012b465d-60c0-44f3-96f3-563c89d2ac6e | |
# ╠═d7d76f4b-1475-42f1-afb6-c15dee869c47 | |
# ╠═146042f0-73cb-49e7-a7fb-0f5d05a1d018 |
This file contains hidden or bidirectional Unicode text that may be interpreted or compiled differently than what appears below. To review, open the file in an editor that reveals hidden Unicode characters.
Learn more about bidirectional Unicode characters
### A Pluto.jl notebook ### | |
# v0.19.27 | |
using Markdown | |
using InteractiveUtils | |
# ╔═╡ 907ea08e-2d52-11ee-0218-67180aaaba6e | |
begin | |
begin | |
using Pkg | |
Pkg.activate(".") | |
end | |
end | |
# ╔═╡ 0273fe28-3cf4-4d92-96ed-b4b81b789e8c | |
using PyCall, Statistics, LinearAlgebra | |
# ╔═╡ c204a489-151d-4c37-8255-22803946a1c5 | |
using .Iterators | |
# ╔═╡ e564e588-4336-4069-98bb-fb5cfa78a8f5 | |
using LsqFit | |
# ╔═╡ 933ec515-4bbe-4208-a496-221edc71b232 | |
begin | |
@pyinclude("python_calib.py") | |
chessboard_correspondences = py"getChessboardCorners"() | |
end | |
# ╔═╡ bed0b3f8-83e6-4d7c-b54f-b9be0b5f71b9 | |
world_cords, image_cords = chessboard_correspondences[1,:] | |
# ╔═╡ 6b057b4c-7d75-48bd-9f9d-96d827618755 | |
begin | |
function get_normalization_matrix(pts, name="A") | |
pts = Float64.(pts) | |
x_mean, y_mean = mean(pts, dims=1) | |
var_x, var_y = var(pts, dims=1;corrected=false) | |
s_x , s_y = sqrt(2/var_x), sqrt(2/var_y) | |
# println("Matrix: $(name) : meanx $(x_mean) , meany $(y_mean) , varx $(var_x) , vary $(var_y) , sx $(s_x) , sy $(s_y)") | |
n = [s_x 0 -s_x*x_mean;0 s_y -s_y*y_mean; 0 0 1] | |
# print(n) | |
n_inv = [(1 ./ s_x) 0 x_mean; 0 (1 ./ s_y) y_mean;0 0 1] | |
# @info "N:" n n_inv | |
return Float64.(n), Float64.(n_inv) | |
end | |
function normalize_points(cords) | |
views = size(cords)[1] | |
ret_correspondences = [] | |
for i in 1:views | |
imp, objp = chessboard_correspondences[i,:] | |
N_x, N_x_inv = get_normalization_matrix(objp, "A") | |
N_u, N_u_inv = get_normalization_matrix(imp, "B") | |
val = ones(Float64,(54,1)) | |
normalized_hom_imp = hcat(imp, val) | |
normalized_hom_objp = hcat(objp, val) | |
for i in 1:size(normalized_hom_objp)[1] | |
n_o = N_x * normalized_hom_objp[i,:] | |
normalized_hom_objp[i,:] = n_o/n_o[end] | |
n_u = N_u * normalized_hom_imp[i,:] | |
normalized_hom_imp[i,:] = n_u/n_u[end] | |
end | |
normalized_objp = normalized_hom_objp | |
normalized_imp = normalized_hom_imp | |
push!(ret_correspondences, (imp, objp, normalized_imp, normalized_objp, N_u, N_x, N_u_inv, N_x_inv)) | |
end | |
return ret_correspondences | |
end | |
end | |
# ╔═╡ 0e990a5f-c9a4-4326-8aec-5f5147507a6b | |
chessboard_correspondences_normalized = normalize_points(chessboard_correspondences) | |
# ╔═╡ edbd55cf-8f54-4184-a7f2-5c805c98bdee | |
function compute_view_based_homography(correspondence; reproj = 0) | |
""" | |
correspondence = (imp, objp, normalized_imp, normalized_objp, N_u, N_x, N_u_inv, N_x_inv) | |
""" | |
# @info correspondence | |
image_points = correspondence[1] | |
object_points = correspondence[2] | |
normalized_image_points = correspondence[3] | |
normalized_object_points = correspondence[4] | |
N_u = correspondence[5] | |
N_x = correspondence[6] | |
N_u_inv = correspondence[7] | |
N_x_inv = correspondence[8] | |
N = size(image_points)[1] | |
# print("Number of points in current view : ", N) | |
# print("\n") | |
M = zeros(Float64, (2*N, 9)) | |
# println("Shape of Matrix M : ", size(M)) | |
# println("N_model\n", N_x) | |
# println("N_observed\n", N_u) | |
data = [] | |
for i in 1:54 | |
world_point, image_point = normalized_object_points[i,:], normalized_image_points[i,:] | |
# @info "points:" world_point image_point | |
u = image_point[1] | |
v = image_point[2] | |
X = world_point[1] | |
Y = world_point[2] | |
res = [-X -Y -1 0 0 0 X*u Y*u u; 0 0 0 -X -Y -1 X*v Y*v v] | |
push!(data, res) | |
end | |
Amat = vcat(data...) | |
# @info Amat | |
u, s, vh = svd(Amat) | |
# @show "Svd:" u s vh | |
min_idx = findmin(s)[2] | |
# @info vh min_idx | |
# @info vh[:,min_idx] | |
h_norm = vh[:,min_idx] | |
# @info h_norm | |
h_norm = reshape(h_norm,(3,3))' | |
# @info h_norm | |
h = (N_u_inv * h_norm) * N_x | |
# @info "h:" N_u_inv h_norm | |
# # if abs(h[2, 2]) > 10e-8: | |
h = h ./ h[3, 3] | |
# print("Homography for View : \n", h ) | |
return h | |
end | |
# ╔═╡ bdff8656-8b51-4a33-912f-3b6128d757ed | |
begin | |
H = [] | |
for correspondence in chessboard_correspondences_normalized | |
push!(H, compute_view_based_homography(correspondence; reproj=0)) | |
end | |
H | |
end | |
# ╔═╡ e5ffc1b5-f5ef-45b1-8387-8610825151e5 | |
begin | |
function model(X, h) | |
# @show X length(X) | |
# @show h | |
N = trunc(Int, length(X) / 2) | |
x_j = reshape(X, (2, N))' | |
# @info "x_j:" x_j | |
projected = zeros(2*N) | |
for j in 1:N | |
x, y = x_j[j,:] | |
w = h[7]*x + h[8]*y + h[9] | |
projected[(2*j) - 1] = (h[1] * x + h[2] * y + h[3]) / w | |
projected[2*j] = (h[4] * x + h[5] * y + h[6]) / w | |
end | |
# @info "Projected:" projected length(projected) | |
return projected | |
end | |
function jac_function(X, h) | |
N = trunc(Int, length(X) /2) | |
# @show N | |
x_j = reshape(X , (2, N))' | |
jacobian = zeros(Float64, (2*N, 9)) | |
for j in 1:N | |
x, y = x_j[j,:] | |
sx = Float64(h[1]*x + h[2]*y + h[3]) | |
sy = Float64(h[4]*x + h[5]*y + h[6]) | |
w = Float64(h[7]*x + h[8]*y + h[9]) | |
jacobian[(2*j) - 1,:] = [x/w, y/w, 1/w, 0, 0, 0, -sx*x/w^2, -sx*y/w^2, -sx/w^2] | |
jacobian[2*j,:] = [0, 0, 0, x/w, y/w, 1/w, -sy*x/w^2, -sy*y/w^2, -sy/w^2] | |
end | |
# @info "Jacobian:" jacobian length(jacobian) | |
return jacobian | |
end | |
function refine_homographies(H, correspondence; skip=false) | |
if skip | |
return H | |
end | |
image_points = correspondence[1] | |
object_points = correspondence[2] | |
normalized_image_points = correspondence[3] | |
normalized_object_points = correspondence[4] | |
# N_u = correspondence[5] | |
N_x = correspondence[6] | |
N_u_inv = correspondence[7] | |
N_x_inv = correspondence[8] | |
N = size(normalized_object_points)[1] | |
X = Float64.(collect(flatten(object_points'))) | |
Y = Float64.(collect(flatten(image_points'))) | |
h = collect(flatten(H')) | |
# @show h | |
# @show det(H) | |
# @info "data:" X Y h | |
fit = curve_fit(model, jac_function, Float64.(X), Float64.(Y), h;) | |
if fit.converged | |
H = reshape(fit.param, (3, 3)) | |
end | |
H = H/H[3, 3] | |
return H | |
end | |
end | |
# ╔═╡ 1f668b5b-22aa-43d8-9293-fcf9c2bed5fe | |
begin | |
H_r = [] | |
for i in 1:length(H) | |
# @info "Input Homography:" H[i] | |
h_opt = refine_homographies(H[i], chessboard_correspondences_normalized[i]; skip=false) | |
# @info h_opt | |
# push!(H_r, h_opt') | |
# @info "Refined Homography:" h_opt | |
push!(H_r, h_opt') | |
end | |
H_r | |
end | |
# ╔═╡ fe2c2081-67ab-4a6d-8d88-c81c42f3833a | |
function get_intrinsic_parameters(H_r) | |
M = length(H_r) | |
V = zeros(Float64, (2*M, 6)) | |
function v_pq(p, q, H) | |
v = [ | |
H[1, p]*H[1, q] | |
(H[1, p]*H[2, q] + H[2, p]*H[1, q]) | |
H[2, p]*H[2, q] | |
(H[3, p]*H[1, q] + H[1, p]*H[3, q]) | |
(H[3, p]*H[2, q] + H[2, p]*H[3, q]) | |
H[3, p]*H[3, q] | |
] | |
return v | |
end | |
for i in 1:M | |
H = H_r[i] | |
V[(2*i)-1,:] = v_pq(1, 2, H) | |
V[2*i,:] = v_pq(1,1, H) .- v_pq(2, 2, H) | |
end | |
# solve V.b = 0 | |
u, s, vh = svd(V) | |
# print(u, "\n", s, "\n", vh) | |
# @info u | |
b = vh[:,findmin(s)[2]] | |
@info size(u) size(s) size(vh) | |
print("V.b = 0 Solution : ", b) | |
# according to zhangs method | |
vc = (b[2]*b[4] - b[1]*b[5])/(b[1]*b[3] - b[2]^2) | |
l = b[6] - (b[4]^2 + vc*(b[2]*b[3] - b[1]*b[5]))/b[1] | |
alpha = sqrt((l/b[1])) | |
beta = sqrt((l*b[1])/(b[1]*b[3] - b[2]^2)) | |
gamma = -1*((b[2])*(alpha^2) *(beta/l)) | |
uc = (gamma*vc/beta) - (b[4]*(alpha^2)/l) | |
A = [ alpha gamma uc; | |
0 beta vc; | |
0 0 1.0; | |
] | |
return A, b | |
end | |
# ╔═╡ 5c2a2f80-6884-4d72-914a-973a909c5a22 | |
res = get_intrinsic_parameters(H_r) | |
# ╔═╡ 15bf4e50-3205-4663-bd85-d103e8611d87 | |
b = res[2] | |
# ╔═╡ f235bd77-0cd0-4afb-a1ec-8572f3c01bba | |
B = [b[1] b[2] b[4];b[2] b[3] b[5];b[4] b[5] b[6]] | |
# ╔═╡ d1f50fcd-2be0-4d11-a433-137cddae14dd | |
A = res[1] | |
# ╔═╡ fa99ade6-1d2e-4dfb-88bd-a5a0625cf181 | |
# λ = 320 | |
# ╔═╡ b94d8141-9cb7-413b-85b2-7cef4396a082 | |
# r1 = λ * inv(A) * H_r[1][:,1] | |
# ╔═╡ 407b7789-c370-49a1-9aab-cf797d9a3176 | |
# r2 = λ * inv(A) * H_r[1][:,2] | |
# ╔═╡ cf3a254a-fd66-420d-a72e-59a0f48e4991 | |
# r3 = r1 .* r2 | |
# ╔═╡ 567824cb-5c9e-41b8-8d7e-ab0ac2929b86 | |
# t = λ * inv(A) * H_r[1][:,3] | |
# ╔═╡ bfe49eec-b3f1-42e5-95d6-b6d01ff83ed4 | |
# rt = [r1 r2 r3 t] | |
# ╔═╡ 9203a69e-023f-4057-bf21-f7c007919f06 | |
# ╔═╡ 7a0dbc2a-d429-4453-aa8f-4e7985c115ec | |
1 / det(inv(A) * H_r[1] ) | |
# ╔═╡ e05ad647-570f-42fd-b415-cdcd03cbaec9 | |
1 / det(inv(A) * H_r[2] ) | |
# ╔═╡ 72944130-7ed8-4873-9ca5-ba983f14d8df | |
# 13×2 Matrix{Matrix{Int32}}: | |
# [244 94; 274 92; … ; 475 264; 510 266] [0 0; 1 0; … ; 7 5; 8 5] | |
# [256 357; 255 334; … ; 523 181; 539 131] [0 0; 1 0; … ; 7 5; 8 5] | |
# [277 72; 313 81; … ; 497 374; 544 390] [0 0; 1 0; … ; 7 5; 8 5] | |
# [188 130; 223 127; … ; 475 338; 521 338] [0 0; 1 0; … ; 7 5; 8 5] | |
# [435 50; 450 78; … ; 279 378; 288 431] [0 0; 1 0; … ; 7 5; 8 5] | |
# [589 138; 586 175; … ; 393 357; 389 387] [0 0; 1 0; … ; 7 5; 8 5] | |
# [368 137; 358 169; … ; 159 306; 151 334] [0 0; 1 0; … ; 7 5; 8 5] | |
# [470 92; 465 126; … ; 197 328; 184 370] [0 0; 1 0; … ; 7 5; 8 5] | |
# [219 85; 263 93; … ; 441 313; 469 313] [0 0; 1 0; … ; 7 5; 8 5] | |
# [413 65; 419 103; … ; 293 389; 301 429] [0 0; 1 0; … ; 7 5; 8 5] | |
# [423 71; 426 103; … ; 201 360; 198 408] [0 0; 1 0; … ; 7 5; 8 5] | |
# [402 71; 414 113; … ; 300 350; 311 374] [0 0; 1 0; … ; 7 5; 8 5] | |
# [415 57; 422 97; … ; 271 387; 279 422] [0 0; 1 0; … ; 7 5; 8 5] | |
# ╔═╡ 91e78452-4a87-4d20-aa98-c4ae9310524d | |
begin | |
img_num = 2 | |
λ = mean([1 /norm(inv(A) * H_r[img_num][:,1] ), 1 / norm(inv(A) * H_r[img_num][:,2] )]) | |
r1 = λ * inv(A) * H_r[img_num][:,1] | |
r2 = λ * inv(A) * H_r[img_num][:,2] | |
r3 = r1 .* r2 | |
t = λ * inv(A) * H_r[img_num][:,3] | |
rt = [r1 r2 r3 t] | |
projected = A * rt * [7, 5, 0, 1] # world point being projected to world, this matches data above | |
projected = projected ./ projected[3] | |
end | |
# ╔═╡ 3e1bcc10-79f4-45eb-85bd-2438040524ab | |
A * rt * [0, 0, 0, 1] / 320 | |
# ╔═╡ Cell order: | |
# ╠═907ea08e-2d52-11ee-0218-67180aaaba6e | |
# ╠═0273fe28-3cf4-4d92-96ed-b4b81b789e8c | |
# ╠═c204a489-151d-4c37-8255-22803946a1c5 | |
# ╠═e564e588-4336-4069-98bb-fb5cfa78a8f5 | |
# ╠═933ec515-4bbe-4208-a496-221edc71b232 | |
# ╠═bed0b3f8-83e6-4d7c-b54f-b9be0b5f71b9 | |
# ╠═6b057b4c-7d75-48bd-9f9d-96d827618755 | |
# ╠═0e990a5f-c9a4-4326-8aec-5f5147507a6b | |
# ╟─edbd55cf-8f54-4184-a7f2-5c805c98bdee | |
# ╠═bdff8656-8b51-4a33-912f-3b6128d757ed | |
# ╠═e5ffc1b5-f5ef-45b1-8387-8610825151e5 | |
# ╠═1f668b5b-22aa-43d8-9293-fcf9c2bed5fe | |
# ╠═fe2c2081-67ab-4a6d-8d88-c81c42f3833a | |
# ╠═5c2a2f80-6884-4d72-914a-973a909c5a22 | |
# ╠═15bf4e50-3205-4663-bd85-d103e8611d87 | |
# ╠═f235bd77-0cd0-4afb-a1ec-8572f3c01bba | |
# ╠═d1f50fcd-2be0-4d11-a433-137cddae14dd | |
# ╠═fa99ade6-1d2e-4dfb-88bd-a5a0625cf181 | |
# ╠═b94d8141-9cb7-413b-85b2-7cef4396a082 | |
# ╠═407b7789-c370-49a1-9aab-cf797d9a3176 | |
# ╠═cf3a254a-fd66-420d-a72e-59a0f48e4991 | |
# ╠═567824cb-5c9e-41b8-8d7e-ab0ac2929b86 | |
# ╠═bfe49eec-b3f1-42e5-95d6-b6d01ff83ed4 | |
# ╠═9203a69e-023f-4057-bf21-f7c007919f06 | |
# ╠═7a0dbc2a-d429-4453-aa8f-4e7985c115ec | |
# ╠═e05ad647-570f-42fd-b415-cdcd03cbaec9 | |
# ╠═3e1bcc10-79f4-45eb-85bd-2438040524ab | |
# ╠═72944130-7ed8-4873-9ca5-ba983f14d8df | |
# ╠═91e78452-4a87-4d20-aa98-c4ae9310524d |
This file contains hidden or bidirectional Unicode text that may be interpreted or compiled differently than what appears below. To review, open the file in an editor that reveals hidden Unicode characters.
Learn more about bidirectional Unicode characters
[deps] | |
Cairo = "159f3aea-2a34-519c-b102-8c37f9878175" | |
CoordinateTransformations = "150eb455-5306-5404-9cee-2592286d6298" | |
FileIO = "5789e2e9-d7fb-5bc7-8068-2c6fae9b9549" | |
ImageDraw = "4381153b-2b60-58ae-a1ba-fd683676385f" | |
Images = "916415d5-f1e6-5110-898d-aaa5f9f070e0" | |
LazySets = "b4f0291d-fe17-52bc-9479-3d1a343d9043" | |
LinearAlgebra = "37e2e46d-f89d-539d-b4ee-838fcccc9c8e" | |
Luxor = "ae8d54c2-7ccd-5906-9d76-62fc9837b5bc" | |
Pluto = "c3e4b0f8-55cb-11ea-2926-15256bba5781" | |
PyCall = "438e738f-606a-5dbb-bf0a-cddfbfd45ab0" | |
RawArray = "d3d335b2-f152-507c-820e-958e337efb65" | |
StaticArrays = "90137ffa-7385-5640-81b9-e52037218182" | |
Statistics = "10745b16-79ce-11e8-11f9-7d13ad32a3b2" |
This file contains hidden or bidirectional Unicode text that may be interpreted or compiled differently than what appears below. To review, open the file in an editor that reveals hidden Unicode characters.
Learn more about bidirectional Unicode characters
from __future__ import print_function, division | |
import os | |
import glob | |
import sys, argparse | |
import pprint | |
import numpy as np | |
import cv2 | |
from scipy import optimize as opt | |
np.set_printoptions(suppress=True) | |
puts = pprint.pprint | |
DATA_DIR = "data/" | |
DEBUG_DIR = "data/" | |
PATTERN_SIZE = (9, 6) | |
SQUARE_SIZE = 1.0 # | |
def show_image(string, image): | |
cv2.imshow(string, image) | |
cv2.waitKey() | |
def get_camera_images(): | |
images = [each for each in glob.glob(DATA_DIR + "*.jpg")] | |
images = sorted(images) | |
for each in images: | |
yield (each, cv2.imread(each, 0)) | |
# yield [(each, cv2.imread(each, 0)) for each in images] | |
def getChessboardCorners(images = None, visualize=False): | |
objp = np.zeros((PATTERN_SIZE[1]*PATTERN_SIZE[0], 3), dtype=np.float64) | |
# objp[:,:2] = np.mgrid[0:PATTERN_SIZE[1], 0:PATTERN_SIZE[0]].T.reshape(-1, 2) | |
objp[:, :2] = np.indices(PATTERN_SIZE).T.reshape(-1, 2) | |
objp *= SQUARE_SIZE | |
chessboard_corners = [] | |
image_points = [] | |
object_points = [] | |
correspondences = [] | |
ctr=0 | |
for (path, each) in get_camera_images(): #images: | |
print("Processing Image : ", path) | |
ret, corners = cv2.findChessboardCorners(each, patternSize=PATTERN_SIZE) | |
if ret: | |
print ("Chessboard Detected ") | |
corners = corners.reshape(-1, 2) | |
# corners = corners.astype(int) | |
# corners = corners.astype(np.float64) | |
if corners.shape[0] == objp.shape[0] : | |
# print(objp[:,:-1].shape) | |
image_points.append(corners) | |
object_points.append(objp[:,:-1]) #append only World_X, World_Y. Because World_Z is ZERO. Just a simple modification for get_normalization_matrix | |
assert corners.shape == objp[:, :-1].shape, "mismatch shape corners and objp[:,:-1]" | |
correspondences.append([corners.astype(int), objp[:, :-1].astype(int)]) | |
if visualize: | |
# Draw and display the corners | |
ec = cv2.cvtColor(each, cv2.COLOR_GRAY2BGR) | |
cv2.drawChessboardCorners(ec, PATTERN_SIZE, corners, ret) | |
cv2.imwrite(DEBUG_DIR + str(ctr)+".png", ec) | |
# show_image("mgri", ec) | |
# | |
else: | |
print ("Error in detection points", ctr) | |
ctr+=1 | |
# sys.exit(1) | |
return correspondences | |
def normalize_points(chessboard_correspondences): | |
views = len(chessboard_correspondences) | |
def get_normalization_matrix(pts, name="A"): | |
pts = pts.astype(np.float64) | |
x_mean, y_mean = np.mean(pts, axis=0) | |
var_x, var_y = np.var(pts, axis=0) | |
s_x , s_y = np.sqrt(2/var_x), np.sqrt(2/var_y) | |
print("Matrix: {4} : meanx {0}, meany {1}, varx {2}, vary {3}, sx {5}, sy {6} ".format(x_mean, y_mean, var_x, var_y, name, s_x, s_y)) | |
n = np.array([[s_x, 0, -s_x*x_mean], [0, s_y, -s_y*y_mean], [0, 0, 1]]) | |
# print(n) | |
n_inv = np.array([ [1./s_x , 0 , x_mean], [0, 1./s_y, y_mean] , [0, 0, 1] ]) | |
return n.astype(np.float64), n_inv.astype(np.float64) | |
ret_correspondences = [] | |
for i in range(views): | |
imp, objp = chessboard_correspondences[i] | |
N_x, N_x_inv = get_normalization_matrix(objp, "A") | |
N_u, N_u_inv = get_normalization_matrix(imp, "B") | |
# print(N_x) | |
# print(N_u) | |
# convert imp, objp to homogeneous | |
# hom_imp = np.array([np.array([[each[0]], [each[1]], [1.0]]) for each in imp]) | |
# hom_objp = np.array([np.array([[each[0]], [each[1]], [1.0]]) for each in objp]) | |
hom_imp = np.array([ [[each[0]], [each[1]], [1.0]] for each in imp]) | |
hom_objp = np.array([ [[each[0]], [each[1]], [1.0]] for each in objp]) | |
normalized_hom_imp = hom_imp | |
normalized_hom_objp = hom_objp | |
for i in range(normalized_hom_objp.shape[0]): | |
# 54 points. iterate one by onea | |
# all points are homogeneous | |
n_o = np.matmul(N_x,normalized_hom_objp[i]) | |
normalized_hom_objp[i] = n_o/n_o[-1] | |
n_u = np.matmul(N_u,normalized_hom_imp[i]) | |
normalized_hom_imp[i] = n_u/n_u[-1] | |
normalized_objp = normalized_hom_objp.reshape(normalized_hom_objp.shape[0], normalized_hom_objp.shape[1]) | |
normalized_imp = normalized_hom_imp.reshape(normalized_hom_imp.shape[0], normalized_hom_imp.shape[1]) | |
normalized_objp = normalized_objp[:,:-1] | |
normalized_imp = normalized_imp[:,:-1] | |
# print(normalized_imp) | |
ret_correspondences.append((imp, objp, normalized_imp, normalized_objp, N_u, N_x, N_u_inv, N_x_inv)) | |
return ret_correspondences | |
def compute_view_based_homography(correspondence, reproj = False): | |
""" | |
correspondence = (imp, objp, normalized_imp, normalized_objp, N_u, N_x, N_u_inv, N_x_inv) | |
""" | |
image_points = correspondence[0] | |
object_points = correspondence[1] | |
normalized_image_points = correspondence[2] | |
normalized_object_points = correspondence[3] | |
N_u = correspondence[4] | |
N_x = correspondence[5] | |
N_u_inv = correspondence[6] | |
N_x_inv = correspondence[7] | |
N = len(image_points) | |
print("Number of points in current view : ", N) | |
M = np.zeros((2*N, 9), dtype=np.float64) | |
print("Shape of Matrix M : ", M.shape) | |
print("N_model\n", N_x) | |
print("N_observed\n", N_u) | |
# create row wise allotment for each 0-2i rows | |
# that means 2 rows.. | |
for i in range(N): | |
X, Y = normalized_object_points[i] #A | |
u, v = normalized_image_points[i] #B | |
row_1 = np.array([ -X, -Y, -1, 0, 0, 0, X*u, Y*u, u]) | |
row_2 = np.array([ 0, 0, 0, -X, -Y, -1, X*v, Y*v, v]) | |
M[2*i] = row_1 | |
M[(2*i) + 1] = row_2 | |
print ("p_model {0} \t p_obs {1}".format((X, Y), (u, v))) | |
# M.h = 0 . solve system of linear equations using SVD | |
u, s, vh = np.linalg.svd(M) | |
print("Computing SVD of M") | |
# print("U : Shape {0} : {1}".format(u.shape, u)) | |
# print("S : Shape {0} : {1}".format(s.shape, s)) | |
# print("V_t : Shape {0} : {1}".format(vh.shape, vh)) | |
# print(s, np.argmin(s)) | |
h_norm = vh[np.argmin(s)] | |
h_norm = h_norm.reshape(3, 3) | |
# print("Normalized Homography Matrix : \n" , h_norm) | |
print(N_u_inv) | |
print(N_x) | |
# h = h_norm | |
h = np.matmul(np.matmul(N_u_inv,h_norm), N_x) | |
# if abs(h[2, 2]) > 10e-8: | |
h = h[:,:]/h[2, 2] | |
print("Homography for View : \n", h ) | |
if reproj: | |
reproj_error = 0 | |
for i in range(len(image_points)): | |
t1 = np.array([[object_points[i][0]], [object_points[i][1]], [1.0]]) | |
t = np.matmul(h, t1).reshape(1, 3) | |
t = t/t[0][-1] | |
formatstring = "Imp {0} | ObjP {1} | Tx {2}".format(image_points[i], object_points[i], t) | |
print(formatstring) | |
reproj_error += np.sum(np.abs(image_points[i] - t[0][:-1])) | |
reproj_error = np.sqrt(reproj_error/N)/100.0 | |
print("Reprojection error : ", reproj_error) | |
return h | |
def minimizer_func(initial_guess, X, Y, h, N): | |
# X : normalized object points flattened | |
# Y : normalized image points flattened | |
# h : homography flattened | |
# N : number of points | |
# | |
x_j = X.reshape(N, 2) | |
# Y = Y.reshape(N, 2) | |
# h = h.reshape(3, 3) | |
projected = [0 for i in range(2*N)] | |
for j in range(N): | |
x, y = x_j[j] | |
w = h[6]*x + h[7]*y + h[8] | |
# pts = np.matmul(np.array([ [h[0], h[1], h[2]] , [h[3], h[4], h[5]]]), np.array([ [x] , [y] , [1.]])) | |
# pts = pts/float(w) | |
# u, v = pts[0][0], pts[1][0] | |
projected[2*j] = (h[0] * x + h[1] * y + h[2]) / w | |
projected[2*j + 1] = (h[3] * x + h[4] * y + h[5]) / w | |
# return projected | |
return (np.abs(projected - Y))**2 | |
def jac_function(initial_guess, X, Y, h, N): | |
x_j = X.reshape(N, 2) | |
jacobian = np.zeros( (2*N, 9) , np.float64) | |
for j in range(N): | |
x, y = x_j[j] | |
sx = np.float64(h[0]*x + h[1]*y + h[2]) | |
sy = np.float64(h[3]*x + h[4]*y + h[5]) | |
w = np.float64(h[6]*x + h[7]*y + h[8]) | |
jacobian[2*j] = np.array([x/w, y/w, 1/w, 0, 0, 0, -sx*x/w**2, -sx*y/w**2, -sx/w**2]) | |
jacobian[2*j + 1] = np.array([0, 0, 0, x/w, y/w, 1/w, -sy*x/w**2, -sy*y/w**2, -sy/w**2]) | |
return jacobian | |
def refine_homographies(H, correspondences, skip=False): | |
if skip: | |
return H | |
image_points = correspondence[0] | |
object_points = correspondence[1] | |
normalized_image_points = correspondence[2] | |
normalized_object_points = correspondence[3] | |
N_u = correspondence[4] | |
N_x = correspondence[5] | |
N_u_inv = correspondence[6] | |
N_x_inv = correspondence[7] | |
N = normalized_object_points.shape[0] | |
X = object_points.flatten() | |
Y = image_points.flatten() | |
h = H.flatten() | |
h_prime = opt.least_squares(fun=minimizer_func, x0=h, jac=jac_function, method="lm" , args=[X, Y, h, N], verbose=0) | |
if h_prime.success: | |
H = h_prime.x.reshape(3, 3) | |
H = H/H[2, 2] | |
return H | |
def get_intrinsic_parameters(H_r): | |
M = len(H_r) | |
V = np.zeros((2*M, 6), np.float64) | |
def v_pq(p, q, H): | |
v = np.array([ | |
H[0, p]*H[0, q], | |
H[0, p]*H[1, q] + H[1, p]*H[0, q], | |
H[1, p]*H[1, q], | |
H[2, p]*H[0, q] + H[0, p]*H[2, q], | |
H[2, p]*H[1, q] + H[1, p]*H[2, q], | |
H[2, p]*H[2, q] | |
]) | |
return v | |
for i in range(M): | |
H = H_r[i] | |
V[2*i] = v_pq(p=0, q=1, H=H) | |
V[2*i + 1] = np.subtract(v_pq(p=0, q=0, H=H), v_pq(p=1, q=1, H=H)) | |
# solve V.b = 0 | |
u, s, vh = np.linalg.svd(V) | |
# print(u, "\n", s, "\n", vh) | |
b = vh[np.argmin(s)] | |
print("V.b = 0 Solution : ", b.shape) | |
# according to zhangs method | |
vc = (b[1]*b[3] - b[0]*b[4])/(b[0]*b[2] - b[1]**2) | |
l = b[5] - (b[3]**2 + vc*(b[1]*b[2] - b[0]*b[4]))/b[0] | |
alpha = np.sqrt((l/b[0])) | |
beta = np.sqrt(((l*b[0])/(b[0]*b[2] - b[1]**2))) | |
gamma = -1*((b[1])*(alpha**2) *(beta/l)) | |
uc = (gamma*vc/beta) - (b[3]*(alpha**2)/l) | |
print([vc, | |
l, | |
alpha, | |
beta, | |
gamma, | |
uc]) | |
A = np.array([ | |
[alpha, gamma, uc], | |
[0, beta, vc], | |
[0, 0, 1.0], | |
]) | |
print("Intrinsic Camera Matrix is :") | |
print(A) | |
return A |
Sign up for free
to join this conversation on GitHub.
Already have an account?
Sign in to comment
https://kushalvyas.github.io/calib.html