Last active
April 8, 2020 07:24
-
-
Save leimao/ece7217b5d07fe4e685c47af5e76744a to your computer and use it in GitHub Desktop.
This is a simulation for the reorg layer in YOLO v2 model.
This file contains bidirectional Unicode text that may be interpreted or compiled differently than what appears below. To review, open the file in an editor that reveals hidden Unicode characters.
Learn more about bidirectional Unicode characters
import numpy as np | |
stride_MACRO = 2 | |
batch_MACRO = 2 | |
C_MACRO = 4 | |
H_MACRO = 6 | |
W_MACRO = 6 | |
""" | |
Forward(Forward=True): [N,C,H,W] to [N,C//stride//stride,H*stride,W*stride] | |
Backward(Forward=False): [N,C,H,W] to [N,C*stride*stride,H//stride,W//stride] | |
""" | |
def reorg(arrayIn, batch, C, H, W, stride, forward=False): | |
arrayLen = len(arrayIn) | |
arrayOut = np.zeros(arrayLen) | |
out_c = C//(stride*stride) | |
for b in range(batch): | |
for k in range(C): | |
for j in range(H): | |
for i in range(W): | |
in_index = i + W*(j + H*(k + C*b)) | |
c2 = k % out_c | |
offset = k // out_c | |
w2 = i*stride + offset % stride | |
h2 = j*stride + offset // stride | |
out_index = int(w2 + W*stride*(h2 + H*stride*(c2 + out_c*b))) | |
if forward: | |
arrayOut[out_index] = arrayIn[in_index] | |
else: | |
arrayOut[in_index] = arrayIn[out_index] | |
return arrayOut | |
# First backward (channel increase) then forward (channel decrease) | |
# Mimicking author's implementation | |
# Reorgnization reversible | |
# Reorgnization result was not expected by the public | |
def backward_forward_author(): | |
stride = stride_MACRO | |
input_batch = batch_MACRO | |
input_C = C_MACRO | |
input_H = H_MACRO | |
input_W = W_MACRO | |
output_batch = input_batch | |
output_C = input_C * stride * stride | |
output_H = input_H // stride | |
output_W = input_W // stride | |
# Parameters for reorg function | |
reorg_stride = stride_MACRO | |
reorg_batch = batch_MACRO | |
reorg_C = input_C | |
reorg_H = input_H | |
reorg_W = input_W | |
matrix1_linear = np.array(list(range(input_batch * input_C * input_H * input_W))).astype(np.int) | |
matrix1 = np.reshape(matrix1_linear, (input_batch, input_C, input_H, input_W)) | |
#print(matrix1[0,0,:,:]) | |
print(matrix1) | |
# Reorg | |
matrix2_linear = reorg(matrix1_linear, batch=reorg_batch, C=reorg_C, H=reorg_H, W=reorg_W, stride=reorg_stride, forward=False).astype(np.int) | |
matrix2 = np.reshape(matrix2_linear, (output_batch, output_C, output_H, output_W)) | |
#print(matrix2[0,0,:,:]) | |
print(matrix2) | |
# Reorg back | |
# matrix3 should be identical to matrix1 | |
matrix3_linear = reorg(matrix2_linear, batch=reorg_batch, C=reorg_C, H=reorg_H, W=reorg_W, stride=reorg_stride, forward=True).astype(np.int) | |
matrix3 = np.reshape(matrix3_linear, (input_batch, input_C, input_H, input_W)) | |
#print(matrix3[0,0,:,:]) | |
print(matrix3) | |
assert(np.array_equal(matrix1, matrix3)) | |
# First forward (channel decrease) then backward (channel increase) | |
# Reorgnization reversible | |
# Reorgnization result was expected by the public | |
def forward_backward_author(): | |
stride = stride_MACRO | |
input_batch = batch_MACRO | |
input_C = C_MACRO | |
input_H = H_MACRO | |
input_W = W_MACRO | |
output_batch = input_batch | |
output_C = input_C // stride // stride | |
output_H = input_H * stride | |
output_W = input_W * stride | |
# Parameters for reorg function | |
reorg_stride = stride_MACRO | |
reorg_batch = batch_MACRO | |
reorg_C = input_C | |
reorg_H = input_H | |
reorg_W = input_W | |
matrix1_linear = np.array(list(range(input_batch * input_C * input_H * input_W))).astype(np.int) | |
matrix1 = np.reshape(matrix1_linear, (input_batch, input_C, input_H, input_W)) | |
#print(matrix1[0,0,:,:]) | |
print(matrix1) | |
# Reorg | |
matrix2_linear = reorg(matrix1_linear, batch=reorg_batch, C=reorg_C, H=reorg_H, W=reorg_W, stride=reorg_stride, forward=True).astype(np.int) | |
matrix2 = np.reshape(matrix2_linear, (output_batch, output_C, output_H, output_W)) | |
#print(matrix2[0,0,:,:]) | |
print(matrix2) | |
# Reorg back | |
# matrix3 should be identical to matrix1 | |
matrix3_linear = reorg(matrix2_linear, batch=reorg_batch, C=reorg_C, H=reorg_H, W=reorg_W, stride=reorg_stride, forward=False).astype(np.int) | |
matrix3 = np.reshape(matrix3_linear, (input_batch, input_C, input_H, input_W)) | |
#print(matrix3[0,0,:,:]) | |
print(matrix3) | |
assert(np.array_equal(matrix1, matrix3)) | |
# First backward (channel increase) then forward (channel decrease) | |
# Reorgnization reversible | |
# Reorgnization result was expected by the public | |
def backward_forward_leimao(): | |
stride = stride_MACRO | |
input_batch = batch_MACRO | |
input_C = C_MACRO | |
input_H = H_MACRO | |
input_W = W_MACRO | |
output_batch = input_batch | |
output_C = input_C * stride * stride | |
output_H = input_H // stride | |
output_W = input_W // stride | |
# Parameters for reorg function | |
use_output_shape = True if output_C >= input_C else False | |
reorg_stride = stride_MACRO | |
reorg_batch = batch_MACRO | |
reorg_C = output_C if use_output_shape else input_C | |
reorg_H = output_H if use_output_shape else input_H | |
reorg_W = output_W if use_output_shape else input_W | |
matrix1_linear = np.array(list(range(input_batch * input_C * input_H * input_W))).astype(np.int) | |
matrix1 = np.reshape(matrix1_linear, (input_batch, input_C, input_H, input_W)) | |
#print(matrix1[0,0,:,:]) | |
print(matrix1) | |
# Reorg | |
matrix2_linear = reorg(matrix1_linear, batch=reorg_batch, C=reorg_C, H=reorg_H, W=reorg_W, stride=reorg_stride, forward=False).astype(np.int) | |
matrix2 = np.reshape(matrix2_linear, (output_batch, output_C, output_H, output_W)) | |
#print(matrix2[0,0,:,:]) | |
print(matrix2) | |
# Reorg back | |
# matrix3 should be identical to matrix1 | |
matrix3_linear = reorg(matrix2_linear, batch=reorg_batch, C=reorg_C, H=reorg_H, W=reorg_W, stride=reorg_stride, forward=True).astype(np.int) | |
matrix3 = np.reshape(matrix3_linear, (input_batch, input_C, input_H, input_W)) | |
#print(matrix3[0,0,:,:]) | |
print(matrix3) | |
assert(np.array_equal(matrix1, matrix3)) | |
# First forward (channel decrease) then backward (channel increase) | |
# Reorgnization reversible | |
# Reorgnization result was expected by the public | |
def forward_backward_leimao(): | |
stride = stride_MACRO | |
input_batch = batch_MACRO | |
input_C = C_MACRO | |
input_H = H_MACRO | |
input_W = W_MACRO | |
output_batch = input_batch | |
output_C = input_C // stride // stride | |
output_H = input_H * stride | |
output_W = input_W * stride | |
# Parameters for reorg function | |
use_output_shape = True if output_C >= input_C else False | |
reorg_stride = stride_MACRO | |
reorg_batch = batch_MACRO | |
reorg_C = output_C if use_output_shape else input_C | |
reorg_H = output_H if use_output_shape else input_H | |
reorg_W = output_W if use_output_shape else input_W | |
matrix1_linear = np.array(list(range(input_batch * input_C * input_H * input_W))).astype(np.int) | |
matrix1 = np.reshape(matrix1_linear, (input_batch, input_C, input_H, input_W)) | |
#print(matrix1[0,0,:,:]) | |
print(matrix1) | |
# Reorg | |
matrix2_linear = reorg(matrix1_linear, batch=reorg_batch, C=reorg_C, H=reorg_H, W=reorg_W, stride=reorg_stride, forward=True).astype(np.int) | |
matrix2 = np.reshape(matrix2_linear, (output_batch, output_C, output_H, output_W)) | |
#print(matrix2[0,0,:,:]) | |
print(matrix2) | |
# Reorg back | |
# matrix3 should be identical to matrix1 | |
matrix3_linear = reorg(matrix2_linear, batch=reorg_batch, C=reorg_C, H=reorg_H, W=reorg_W, stride=reorg_stride, forward=False).astype(np.int) | |
matrix3 = np.reshape(matrix3_linear, (input_batch, input_C, input_H, input_W)) | |
#print(matrix3[0,0,:,:]) | |
print(matrix3) | |
assert(np.array_equal(matrix1, matrix3)) | |
if __name__ == '__main__': | |
backward_forward_author() | |
print("**********************************") | |
forward_backward_author() | |
print("==================================") | |
backward_forward_leimao() | |
print("**********************************") | |
forward_backward_leimao() |
Sign up for free
to join this conversation on GitHub.
Already have an account?
Sign in to comment