Skip to content

Instantly share code, notes, and snippets.

View SkalskiP's full-sized avatar

Piotr Skalski SkalskiP

View GitHub Profile
@SkalskiP
SkalskiP / split-video-into-images.sh
Created June 24, 2022 10:09
Split `.mp4` video into separate `.jpg` frames
fmpeg -i source-video.mp4 -r 1 -an -f image2 "frames/frame-%05d.jpg"
def forward_pass(self, a_prev: np.array) -> np.array:
self._z = np.maximum(0, a_prev)
return self._z
def backward_pass(self, da_curr: np.array) -> np.array:
dz = np.array(da_curr, copy=True)
dz[self._z <= 0] = 0
return dz
@SkalskiP
SkalskiP / flatten.py
Last active May 23, 2020 17:01
Flatten layer
def forward_pass(self, a_prev: np.array) -> np.array:
self._shape = a_prev.shape
return np.ravel(a_prev).reshape(a_prev.shape[0], -1)
def backward_pass(self, da_curr: np.array) -> np.array:
return da_curr.reshape(self._shape)
@SkalskiP
SkalskiP / dropout.py
Created May 23, 2020 16:44
Dropout forward and backward
def forward_pass(self, a_prev: np.array, training: bool) -> np.array:
if training:
self._mask = (np.random.rand(*a_prev.shape) < self._keep_prob)
return self._apply_mask(a_prev, self._mask)
else:
return a_prev
def backward_pass(self, da_curr: np.array) -> np.array:
return self._apply_mask(da_curr, self._mask)
@SkalskiP
SkalskiP / pooling.py
Last active May 30, 2020 17:19
Max pooling
def forward_pass(self, a_prev: np.array) -> np.array:
self._shape = a_prev.shape
n, h_in, w_in, c = a_prev.shape
h_pool, w_pool = self._pool_size
h_out = 1 + (h_in - h_pool) // self._stride
w_out = 1 + (w_in - w_pool) // self._stride
output = np.zeros((n, h_out, w_out, c))
for i in range(h_out):
for j in range(w_out):
@SkalskiP
SkalskiP / conv_frorward.py
Last active May 23, 2020 12:03
Convolution forward
def forward_pass(self, a_prev: np.array) -> np.array:
n, h_in, w_in, _ = a_prev.shape
_, h_out, w_out, _ = output_shape
h_f, w_f, _, n_f = self._w.shape
output_shape = self.calculate_output_dims(input_dims=a_prev.shape)
output = np.zeros(output_shape)
for i in range(h_out):
for j in range(w_out):
@SkalskiP
SkalskiP / model.py
Last active May 30, 2020 17:16
Sequential Model
def train(
self, x_train: np.array, y_train: np.array,
epochs: int, batch_size: int = 64,
) -> None:
for epoch in range(epochs):
for x_batch, y_batch in generate_batches(x_train, y_train, batch_size):
y_hat_batch = self._forward(x_batch)
activation = y_hat_batch - y_batch
self._backward(activation)
self._update()
class MaxPoolLayer(Layer):
def __init__(self, pool_size: Tuple[int, int], stride: int = 2):
self._pool_size = pool_size
self._stride = stride
self._a = None
self._cache = {}
def forward_pass(self, a_prev: np.array, training: bool) -> np.array:
self._a = np.array(a_prev, copy=True)
class MaxPoolLayer(Layer):
def __init__(self, pool_size: Tuple[int, int], stride: int = 2):
self._pool_size = pool_size
self._stride = stride
self._a = None
self._cache = {}
def forward_pass(self, a_prev: np.array, training: bool) -> np.array:
self._a = np.array(a_prev, copy=True)
def forward_pass(self, a_prev: np.array) -> np.array:
self._a_prev = np.array(a_prev, copy=True)
return np.dot(a_prev, self._w.T) + self._b
def backward_pass(self, da_curr: np.array) -> np.array:
n = self._a_prev.shape[0]
self._dw = np.dot(da_curr.T, self._a_prev) / n
self._db = np.sum(da_curr, axis=0, keepdims=True) / n
return np.dot(da_curr, self._w)