Last active
January 18, 2019 18:56
-
-
Save mmuratarat/7c90584910c8db04b3a4ca10752cf1c6 to your computer and use it in GitHub Desktop.
This file contains bidirectional Unicode text that may be interpreted or compiled differently than what appears below. To review, open the file in an editor that reveals hidden Unicode characters.
Learn more about bidirectional Unicode characters
import matplotlib.pyplot as plt | |
import numpy as np | |
from math import ceil | |
def convolution2d(input, filter, bias=0, strides=(1, 1), padding='SAME'): | |
#This is only for using one filter | |
if not len(filter.shape) == 3: | |
raise ValueError("The size of the filter should be (filter_height, filter_width, filter_depth)") | |
if not len(input.shape) == 3: | |
raise ValueError("The size of the input should be (input_height, input_width, input_depth)") | |
if not filter.shape[2] == input.shape[2]: | |
raise ValueError("the input and the filter should have the same depth.") | |
input_w, input_h = input.shape[1], input.shape[0] # input width and input height | |
filter_w, filter_h = filter.shape[1], filter.shape[0] # filter width and filter height | |
if padding == 'VALID': | |
output_h = int(ceil(float(input_h - filter_h + 1) / float(strides[0]))) | |
output_w = int(ceil(float(input_w - filter_w + 1) / float(strides[1]))) | |
output = np.zeros((output_h, output_w)) # convolution output | |
for x in range(output_w): # Loop over every pixel of the output | |
for y in range(output_h): | |
# element-wise multiplication of the filter and the image | |
output[y, x] = (filter * input[y * strides[0]:y * strides[0] + filter_h, | |
x * strides[1]:x * strides[1] + filter_w, :]).sum() + bias | |
if padding == 'SAME': | |
output_h = int(ceil(float(input_h) / float(strides[0]))) | |
output_w = int(ceil(float(input_w) / float(strides[1]))) | |
if input_h % strides[0] == 0: | |
pad_along_height = max((filter_h - strides[0]), 0) | |
else: | |
pad_along_height = max(filter_h - (input_h % strides[0]), 0) | |
if input_w % strides[1] == 0: | |
pad_along_width = max((filter_w - strides[1]), 0) | |
else: | |
pad_along_width = max(filter_w - (input_w % strides[1]), 0) | |
pad_top = pad_along_height // 2 #amount of zero padding on the top | |
pad_bottom = pad_along_height - pad_top # amount of zero padding on the bottom | |
pad_left = pad_along_width // 2 # amount of zero padding on the left | |
pad_right = pad_along_width - pad_left # amount of zero padding on the right | |
output = np.zeros((output_h, output_w)) # convolution output | |
# Add zero padding to the input image | |
image_padded = np.zeros((input.shape[0] + pad_along_height, input.shape[1] + pad_along_width, input.shape[2])) | |
image_padded[pad_top:-pad_bottom, pad_left:-pad_right, :] = input | |
for x in range(output_w): # Loop over every pixel of the output | |
for y in range(output_h): | |
# element-wise multiplication of the filter and the image | |
output[y, x] = (filter * image_padded[y * strides[0]:y * strides[0] + filter_h, | |
x * strides[1]:x * strides[1] + filter_w, :]).sum() + bias | |
return output |
Sign up for free
to join this conversation on GitHub.
Already have an account?
Sign in to comment