Skip to content

Instantly share code, notes, and snippets.

View SkalskiP's full-sized avatar
👨‍💻
I open-source stuff

Piotr Skalski SkalskiP

👨‍💻
I open-source stuff
View GitHub Profile
class FlattenLayer(Layer):
def __init__(self):
self._shape = ()
def forward_pass(self, a_prev: np.array, training: bool) -> np.array:
self._shape = a_prev.shape
return np.ravel(a_prev).reshape(a_prev.shape[0], -1)
def backward_pass(self, da_curr: np.array) -> np.array:
def plot_shap_top_explanations(
model: Model,
image: np.array,
class_names_mapping: Dict[int, str],
top_preds_count: int = 3,
fig_name: Optional[str] = None
) -> None:
image_columns = 3
image_rows = math.ceil(top_preds_count / image_columns)
def plot_lime_top_explanations(
model: Model,
image: np.array,
class_names_mapping: Dict[int, str],
top_preds_count: int = 3,
fig_name: Optional[str] = None
) -> None:
image_columns = 3
image_rows = math.ceil(top_preds_count / image_columns)
def plot_eli5_top_explanations(
model: Model,
image: np.array,
class_names_mapping: Dict[int, str],
top_preds_count: int = 3,
fig_name: Optional[str] = None
) -> None:
image_columns = 3
image_rows = math.ceil(top_preds_count / image_columns)
python3 detect.py
--source <test_sample_path>
--cfg <configuration_file_path>
--weights weights/best.py
dataset
├── class_names.txt
├── images
│ ├── image_1.png
│ ├── image_2.png
│ └── image_3.png
│ ...
└── labels
├── image_1.txt
├── image_2.txt
4 0.360558 0.439186 0.068327 0.250741
7 0.697519 0.701205 0.078643 0.228243
3 0.198589 0.683692 0.076613 0.263441
@SkalskiP
SkalskiP / setup.sh
Last active January 25, 2020 18:56
# Clone framework
git clone https://github.com/ultralytics/yolov3.git
# Enter framework catalogue [Linux/MacOS]
cd ./yolov3
# Enter framework catalogue [Windows]
dir ./yolov3
# Setup Python environment
pip install -U -r requirements.txt
@SkalskiP
SkalskiP / train_batch.py
Created November 1, 2018 10:38
Mini-batch gradient descent
def train_batch(X, Y, nn_architecture, epochs, learning_rate, batch_size = 64, verbose=False, callback=None):
params_values = init_layers(nn_architecture, 2)
cost_history = []
accuracy_history = []
# Beginning of additional code snippet
batch_number = X.shape[1] // batch_size
# Ending of additional code snippet
for i in range(epochs):
@SkalskiP
SkalskiP / train.py
Created October 11, 2018 19:56
Putting things together
def train(X, Y, nn_architecture, epochs, learning_rate):
params_values = init_layers(nn_architecture, 2)
cost_history = []
accuracy_history = []
for i in range(epochs):
Y_hat, cashe = full_forward_propagation(X, params_values, nn_architecture)
cost = get_cost_value(Y_hat, Y)
cost_history.append(cost)
accuracy = get_accuracy_value(Y_hat, Y)