Created
May 4, 2020 01:31
-
-
Save phg1024/f019f0ff7436b8eb6eb1db445b24b70b to your computer and use it in GitHub Desktop.
This file contains hidden or bidirectional Unicode text that may be interpreted or compiled differently than what appears below. To review, open the file in an editor that reveals hidden Unicode characters.
Learn more about bidirectional Unicode characters
import cv2 | |
import numpy as np | |
from sklearn.cross_decomposition import PLSRegression | |
from sklearn.linear_model import LinearRegression | |
from sklearn.decomposition import PCA | |
import scipy.sparse as sp | |
import sys | |
reference_colors = [[115, 82, 68], [194, 150, 130], [98, 122, 157], [87, 108, 67], [133, 128, 177], [103, 189, 170], [214, 126, 44], [80, 91, 166], [193, 90, 99], [94, 60, 108], [157, 188, 64], [ | |
224, 163, 46], [56, 61, 150], [70, 148, 73], [175, 54, 60], [231, 199, 31], [187, 86, 149], [8, 133, 161], [243, 243, 242], [200, 200, 200], [160, 160, 160], [121, 122, 121], [85, 85, 85], [52, 52, 52]] | |
org_img = cv2.imread(sys.argv[1], cv2.COLOR_BGR2RGB) | |
corners = [] | |
def click_for_corners(event, x, y, flags, param): | |
# grab references to the global variables | |
global corners, org_img | |
# if the left mouse button was clicked, record the starting | |
# (x, y) coordinates and indicate that cropping is being | |
# performed | |
if event == cv2.EVENT_LBUTTONDOWN: | |
corners.append((x, y)) | |
# check to see if the left mouse button was released | |
elif event == cv2.EVENT_LBUTTONUP: | |
img = org_img.copy() | |
for x, y in corners: | |
cv2.circle(img, (x, y), 3, (0, 255, 0), -1) | |
cv2.imshow("input", img) | |
cv2.namedWindow("input") | |
cv2.setMouseCallback("input", click_for_corners) | |
while len(corners) < 4: | |
img = org_img.copy() | |
for x, y in corners: | |
cv2.circle(img, (x, y), 3, (0, 255, 0), -1) | |
cv2.imshow("input", img) | |
cv2.waitKey(1) | |
# rectify the image | |
h, status = cv2.findHomography(np.array(corners), np.array( | |
[[0, 0], [600, 0], [0, 400], [600, 400]])) | |
rectified_img = cv2.warpPerspective(org_img, h, (600, 400)) | |
pattern_size = [6, 4] | |
# get the center region of each patch | |
vx = np.array([600, 0]) | |
vy = np.array([0, 400]) | |
patch_size = 20 | |
extracted_color = [] | |
reference_color = [] | |
points = [] | |
cidx = 0 | |
for i in range(pattern_size[1]): | |
for j in range(pattern_size[0]): | |
dx = (0.5 + j) * vx / pattern_size[0] | |
dy = (0.5 + i) * vy / pattern_size[1] | |
loc = (dx + dy).astype(np.int32) | |
points.append(loc) | |
# get the average color of a small patch around this point | |
avg_color = np.array([0, 0, 0]) | |
cnt = 0 | |
for di in range(-patch_size, patch_size+1): | |
for dj in range(-patch_size, patch_size+1): | |
ploc = loc + np.array([di, dj]) | |
pix = rectified_img[loc[1], loc[0], :] | |
extracted_color.append([pix[2], pix[1], pix[0]]) | |
reference_color.append(reference_colors[cidx]) | |
cidx += 1 | |
# calibrate with 3x3 transformation matrix | |
nsamples = len(extracted_color) | |
v = np.array([extracted_color[i] + [1] + extracted_color[i] + | |
[1] + extracted_color[i] + [1] for i in range(nsamples)]).ravel() | |
i = np.array([[i*3, i*3, i*3, i*3, i*3+1, i*3+1, i*3+1, i*3+1, | |
i*3+2, i*3+2, i*3+2, i*3+2] for i in range(nsamples)]).ravel() | |
j = np.array([[0, 1, 2, 9] + [3, 4, 5, 10] + [6, 7, 8, 11] | |
for _ in range(nsamples)]).ravel() | |
A = sp.coo_matrix((v, (i, j)), shape=(nsamples*3, 12), dtype=np.float64) | |
b = np.array(reference_color).astype(np.float64).reshape(-1, 1) | |
# trans = sp.linalg.lsqr(A, b)[0] | |
trans = np.linalg.solve(A.T.dot(A).todense(), A.T.dot(b)) | |
print(trans) | |
t = trans[:9].reshape(3, 3) | |
c = trans[9:] | |
print('E0:', np.linalg.norm(np.array(extracted_color, dtype=np.float64) - np.array(reference_color, dtype=np.float64))) | |
print('E1:', np.linalg.norm(A.dot(trans) - b)) | |
calibrated_img = cv2.cvtColor(org_img, cv2.COLOR_BGR2RGB) | |
for im in calibrated_img: | |
pval = t.dot(im[:].astype(np.float64).T).T + c.reshape(-1, 3) | |
im[:] = np.clip(pval, 0, 255).astype(np.uint8) | |
calibrated_img = cv2.cvtColor(calibrated_img, cv2.COLOR_RGB2BGR) | |
calibrated_rectified_img = cv2.cvtColor(rectified_img, cv2.COLOR_BGR2RGB) | |
for im in calibrated_rectified_img: | |
pval = t.dot(im[:].astype(np.float64).T).T + c.reshape(-1, 3) | |
im[:] = np.clip(pval, 0, 255).astype(np.uint8) | |
calibrated_rectified_img = cv2.cvtColor(calibrated_rectified_img, cv2.COLOR_RGB2BGR) | |
for i, p in enumerate(points): | |
cv2.rectangle(rectified_img, (p[0] - 10, p[1] - 10), | |
(p[0] + 10, p[1] + 10), (reference_colors[i][2], reference_colors[i][1], reference_colors[i][0]), -1) | |
cv2.imshow("rectified", rectified_img) | |
for i, p in enumerate(points): | |
cv2.rectangle(calibrated_rectified_img, (p[0] - 10, p[1] - 10), | |
(p[0] + 10, p[1] + 10), (reference_colors[i][2], reference_colors[i][1], reference_colors[i][0]), -1) | |
cv2.imshow("calibrated_rectified", calibrated_rectified_img) | |
cv2.imshow("calibrated", calibrated_img) | |
cv2.waitKey() |
Sign up for free
to join this conversation on GitHub.
Already have an account?
Sign in to comment
Color checker card: https://en.wikipedia.org/wiki/File:Color_Checker.pdf