Last active
March 24, 2024 07:22
-
-
Save smeschke/6de4cffafa864f68e713418f80a1a25d to your computer and use it in GitHub Desktop.
Python script that reconstructs torn sections of paper.
This file contains hidden or bidirectional Unicode text that may be interpreted or compiled differently than what appears below. To review, open the file in an editor that reveals hidden Unicode characters.
Learn more about bidirectional Unicode characters
import cv2, numpy as np, random, math | |
# Find contour edges | |
# Find the edge that is torn | |
# use the hough line transform | |
# create a mask image where the lines and white on a black background | |
# check if the point is in a white or black region | |
# Rotate the torn edges | |
# Measure how much they overlap | |
# The rotation with the maximum overlap will be how they should align | |
# Align and display the images | |
# Finds the shards of paper in an image | |
def get_shards(img): | |
gray = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY) # Gray | |
gray = cv2.blur(gray, (2,2)) | |
canny = cv2.Canny(gray, 30, 150) # Canny | |
cv2.imshow('canny', canny) | |
# Find contours | |
contours, _ = cv2.findContours(canny,cv2.RETR_TREE,cv2.CHAIN_APPROX_SIMPLE) | |
# Draw contours on canny (this connects the contours) | |
cv2.drawContours(canny, contours, -1, 255, 6) | |
# Get mask for floodfill | |
h, w = canny.shape[:2] | |
mask = np.zeros((h+2, w+2), np.uint8) | |
# Floodfill from point (0, 0) | |
cv2.floodFill(canny, mask, (0,0), 123); | |
cv2.imshow('ff', canny) | |
cv2.imwrite('/home/stephen/Desktop/with.png', canny) | |
# Get an image that is only the gray floodfilled area | |
hsv = cv2.cvtColor(canny, cv2.COLOR_GRAY2BGR) | |
lower, upper = np.array([122,122,122]), np.array([124,124,124]) | |
# Threshold the HSV image to get only blue colors | |
mask = cv2.inRange(hsv, lower, upper) | |
# Bitwise-AND mask and original image | |
res = cv2.bitwise_and(hsv,hsv, mask= mask) | |
gray = 255 - cv2.cvtColor(res, cv2.COLOR_BGR2GRAY) | |
_, thresh = cv2.threshold(gray, 220, 255, cv2.THRESH_BINARY_INV); | |
contours, _ = cv2.findContours(thresh,cv2.RETR_TREE,cv2.CHAIN_APPROX_SIMPLE) | |
res = np.zeros_like(res) | |
# Create a list for unconnected contours | |
unconnectedContours = [] | |
for contour in contours: | |
area = cv2.contourArea(contour) | |
# If the contour is not really small, or really big | |
if area > 987 and area < img.shape[0]*img.shape[1]-9000: | |
cv2.drawContours(res, [contour], 0, (255,255,255), cv2.FILLED) | |
unconnectedContours.append(contour) | |
# Return the unconnected contours image and list of contours | |
cv2.imshow('res', res) | |
print(len(unconnectedContours), largest_contour(contours)) | |
cv2.waitKey() | |
return res, unconnectedContours[0] | |
# Distance between two points | |
def distance(a,b): return math.sqrt((a[0]-b[0])**2 + (a[1]-b[1])**2) | |
# Returns a single contour | |
def largest_contour(contours): | |
c = max(contours, key=cv2.contourArea) | |
return c[0] | |
# Draw a contour, but doesn't connect contour[0] with contour[-1] | |
def draw_contour(img, contour, color, thick): | |
for idx in range(len(contour)-1): | |
a, b = contour[idx], contour[idx+1] | |
a,b = tuple(a[0]), tuple(b[0]) | |
if distance(a,b) < 321: cv2.line(img, a, b, color, thick) | |
return img | |
# Finds the lines in an image | |
def lines_mask(img): | |
gray = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY) | |
dst = cv2.Canny(gray, 50, 200, None, 3) | |
# Create mask image | |
mask = np.zeros_like(gray) | |
# Find the lines | |
lines = cv2.HoughLines(dst, 1, np.pi / 180, 100, None, 0, 0) | |
# Draw the lines | |
if lines is not None: | |
for i in range(0, len(lines)): | |
rho = lines[i][0][0] | |
theta = lines[i][0][1] | |
a = math.cos(theta) | |
b = math.sin(theta) | |
x0 = a * rho | |
y0 = b * rho | |
pt1 = (int(x0 + 1000*(-b)), int(y0 + 1000*(a))) | |
pt2 = (int(x0 - 1000*(-b)), int(y0 - 1000*(a))) | |
cv2.line(mask, pt1, pt2, 255, 18, cv2.LINE_AA) | |
#cv2.imshow('mask', mask) | |
return mask | |
# Takes an image and a contour and returns the torn edge | |
def find_torn_edge(img, cnt, img_lines): | |
# Create a temporary iamge | |
img_h, img_w = img.shape[0], img.shape[1] | |
temp = np.zeros((img_h, img_w), np.uint8) | |
temp_human = np.zeros((img_h, img_w), np.uint8) | |
cv2.drawContours(temp_human, [cnt], 0, 78, cv2.FILLED) | |
torn_edge = [] | |
for i in range(len(cnt)): | |
x,y = cnt[i][0] | |
if img_lines[y,x] == 0: torn_edge.append((x,y)) | |
#cnt1 = np.array(torn_edge1) | |
for i in range(len(torn_edge)-1): | |
a = torn_edge[i] | |
b = torn_edge[i+1] | |
cv2.line(temp, a, b, 255, 2) | |
cv2.line(temp_human, a, b, 255, 14) | |
return torn_edge, temp_human, temp | |
# Rotate a contour | |
def rotate_contour(contour, edge_mask, wOverlay, hOverlay, rotation): | |
hw = 2* max(wOverlay, hOverlay) | |
temp = np.zeros((hw,hw), np.uint8) | |
#contour = contour + max(wOverlay, hOverlay) | |
#temp = draw_contour(temp, contour, 255, 2) | |
temp = edge_mask.copy() | |
#cv2.imshow('temp', cv2.resize(temp, (456,456))) | |
rows,cols = temp.shape | |
M = cv2.getRotationMatrix2D((cols/2,rows/2),rotation,1) | |
dst = cv2.warpAffine(temp,M,(cols,rows)) | |
_, thresh = cv2.threshold(dst, 123, 255, cv2.THRESH_BINARY) | |
contours, _ = cv2.findContours(thresh, cv2.RETR_TREE, cv2.CHAIN_APPROX_SIMPLE) | |
return contours[0] | |
# Translate a contour so that is located above another contour | |
def align_translate(left, right): | |
right_center, _ = cv2.minEnclosingCircle(right) | |
left_center, _ = cv2.minEnclosingCircle(left) | |
dx, dy = right_center[0]-left_center[0], right_center[1]-left_center[1] | |
for i in range(len(right)): | |
previous = right[i][0][0] | |
right[i][0][0] = previous - dx | |
previous = right[i][0][1] | |
right[i][0][1] = previous - dy | |
return left, right, dx, dy | |
# Draws the output image | |
def draw_background(best_match, img1, img2, img1_mask, img2_mask, offset): | |
print(best_match) | |
# Create a background for each of the images | |
bgA = np.zeros((2345,2345,3), np.uint8) | |
bgB = np.zeros((2345,2345,3), np.uint8) | |
# Mask images for writing | |
img1 = cv2.bitwise_and(img1, img1, mask = img1_mask) | |
img2 = cv2.bitwise_and(img2, img2, mask = img2_mask) | |
# Determine A buffer size | |
_, rotation, dx, dy = best_match | |
rotation, dx, dy = int(rotation), int(dx), int(dy) | |
b = int(max(abs(dx), abs(dy))) + 10 | |
# Draw the first image on the background | |
bgA[offset+b:offset+b+img_h, offset+b:offset+b+img_w] = img2 | |
# Rotate the second image | |
M = cv2.getRotationMatrix2D((img_w/2,img_h/2),rotation,1) | |
dst = cv2.warpAffine(img1,M,(img_w,img_h)) | |
# Translate it and paste it on the background | |
bgB[offset+b-dy:offset+b-dy+img_h, offset+b-dx:offset+b-dx+img_w] = dst | |
# Combine the backgrounds | |
bg = bgA + bgB | |
# Crop and resize | |
x_vals = b-dx,b-dx+img_w, b,b+img_w | |
y_vals = b, b+img_h, b-dy, b-dy+img_h | |
bg = bg[min(y_vals): max(y_vals), min(x_vals): max(x_vals)] | |
bg = cv2.resize(bg, (987,987)) | |
return bg | |
# Read in images | |
img1 = cv2.imread('/home/stephen/Desktop/paper3.jpg') | |
img2 = cv2.imread('/home/stephen/Desktop/paper4.jpg') | |
img_w, img_h = 780,1040 | |
img1 = cv2.resize(img1, (img_w, img_h)) | |
img2 = cv2.resize(img2, (img_w, img_h)) | |
# Get shards of paper | |
res1, cnt1 = get_shards(img1) | |
res2, cnt2 = get_shards(img2) | |
# Find the lines in the image | |
img1_lines = lines_mask(res1) | |
img2_lines = lines_mask(res2) | |
# Find the torn edges | |
torn_edge1, temp_human1, edge_mask1 = find_torn_edge(img1, cnt1, img1_lines) | |
cv2.imshow('img1', temp_human1) | |
cv2.waitKey() | |
torn_edge2, temp_human2, edge_mask2 = find_torn_edge(img2, cnt2, img2_lines) | |
#cv2.imshow('img1', temp_human2) | |
#cv2.waitKey() | |
# Plot | |
import matplotlib.pyplot as plt | |
import matplotlib.image as mpimg | |
f, axarr = plt.subplots(2,4, sharex=True) | |
axarr[0,0].imshow(img1) | |
axarr[1,0].imshow(img2) | |
outline1 = cv2.drawContours(img1.copy(), [cnt1], 0, (0,255,0), 15) | |
outline2 = cv2.drawContours(img2.copy(), [cnt2], 0, (0,255,0), 15) | |
img1_mask = np.zeros((img_h, img_w), np.uint8) | |
img2_mask = np.zeros((img_h, img_w), np.uint8) | |
img1_mask = cv2.drawContours(img1_mask, [cnt1], 0, 255, cv2.FILLED) | |
img2_mask = cv2.drawContours(img2_mask, [cnt2], 0, 255, cv2.FILLED) | |
axarr[0,1].imshow(outline1) | |
axarr[1,1].imshow(outline2) | |
axarr[0,2].imshow(img1_lines) | |
axarr[1,2].imshow(img2_lines) | |
axarr[0,3].imshow(temp_human1) | |
axarr[1,3].imshow(temp_human2) | |
axarr[0, 0].set_title('Source Images') | |
axarr[0, 1].set_title('Paper Edges') | |
axarr[0, 2].set_title('Hough Lines') | |
axarr[0, 3].set_title('Torn Edge') | |
plt.show() | |
vid_writer = cv2.VideoWriter('/home/stephen/Desktop/re_encode.avi',cv2.VideoWriter_fourcc('M','J','P','G'),20, (987, 987)) | |
# Rotate the contour | |
left, right = cnt1, cnt2 | |
match, matches, angle, best_match = 0, [-1], 0, (0,0,0,0) | |
graph = np.zeros((987,987,3), np.uint8) | |
while angle < 380: | |
# Copy the images and create temporary images | |
img, overlay = img2.copy(), img1.copy() | |
tempA = np.zeros((img.shape[0], img.shape[1]), np.uint8) | |
tempB = np.zeros((img.shape[0], img.shape[1]), np.uint8) | |
# Rotate the contour | |
rotatedContour = rotate_contour(torn_edge1, edge_mask1, max(img.shape), max(img.shape), angle) | |
# Clean left contour | |
clean_left = rotate_contour(torn_edge2, edge_mask2, max(img.shape), max(img.shape), 0) | |
# Translate the contour | |
a,b, dx, dy = align_translate(clean_left, rotatedContour) | |
# Draw the contour | |
tempA = draw_contour(tempA, b, 123, 3) | |
tempB = draw_contour(tempB, a, 123, 3) | |
tempC = tempA + tempB | |
cv2.imwrite('/home/stephen/Desktop/thresh.png', tempC/1.5) | |
_, thresh = cv2.threshold(tempC, 220, 255, cv2.THRESH_BINARY_INV); | |
thresh = 255 - thresh | |
match = sum(sum(thresh)) | |
matches.append(match) | |
# Is this the best match? | |
if match >= max(matches): best_match = b, angle, int(dx), int(dy) | |
# Make the graph | |
p1 = int(angle*2.35), 0 | |
p2 = int(angle*2.35), int(sum(sum(thresh))/75) | |
cv2.line(graph, p1, p2, (0,255,0), 2) | |
bg = draw_background((_, angle, dx, dy), img1, img2, img1_mask, img2_mask, 0) | |
bg += graph | |
img = draw_contour(bg, b, (255,0,255), 2) | |
img = draw_contour(bg, a, (255,255,0), 2) | |
img = draw_contour(bg, best_match[0], (0,255,255), 4) | |
cv2.imshow('bg', bg) | |
vid_writer.write(bg) | |
k=cv2.waitKey(1) | |
if k == 27: break | |
angle += 1 | |
cv2.destroyAllWindows() | |
# Show user the best match | |
bg = draw_background(best_match, img1, img2, img1_mask, img2_mask, 0) | |
cv2.imshow('img', bg) | |
cv2.imwrite('/home/stephen/Desktop/paperReconstruction.png', bg) | |
cv2.waitKey() | |
cv2.destroyAllWindows() |
Sign up for free
to join this conversation on GitHub.
Already have an account?
Sign in to comment