This file contains bidirectional Unicode text that may be interpreted or compiled differently than what appears below. To review, open the file in an editor that reveals hidden Unicode characters.
Learn more about bidirectional Unicode characters
import keras | |
# import keras_retinanet | |
from object_detector_retinanet.keras_retinanet import models | |
from object_detector_retinanet.keras_retinanet.utils.image import read_image_bgr, preprocess_image, resize_image | |
from object_detector_retinanet.keras_retinanet.utils.visualization import draw_box, draw_caption | |
from object_detector_retinanet.keras_retinanet.utils.colors import label_color | |
# import for EM Merger and viz | |
from object_detector_retinanet.keras_retinanet.utils import EmMerger |
This file contains bidirectional Unicode text that may be interpreted or compiled differently than what appears below. To review, open the file in an editor that reveals hidden Unicode characters.
Learn more about bidirectional Unicode characters
costs = [] # initially empty list, this will store all the costs after a certain number of epochs | |
# Start training | |
for epoch in range(number_of_epochs): | |
# ------------------------- forward-prop ------------------------- | |
Z1.forward(X_train) | |
A1.forward(Z1.Z) | |
# ---------------------- Compute Cost ---------------------------- |
This file contains bidirectional Unicode text that may be interpreted or compiled differently than what appears below. To review, open the file in an editor that reveals hidden Unicode characters.
Learn more about bidirectional Unicode characters
# define training constants | |
learning_rate = 1 | |
number_of_epochs = 5000 | |
np.random.seed(48) # set seed value so that the results are reproduceable | |
# (weights will now be initailzaed to the same pseudo-random numbers, each time) | |
# Our network architecture has the shape: | |
# (input)--> [Linear->Sigmoid] -->(output) |
This file contains bidirectional Unicode text that may be interpreted or compiled differently than what appears below. To review, open the file in an editor that reveals hidden Unicode characters.
Learn more about bidirectional Unicode characters
def compute_keras_like_bce_cost(Y, P_hat, from_logits=False): | |
""" | |
This function computes the Binary Cross-Entropy(stable_bce) Cost function the way Keras | |
implements it. Accepting either probabilities(P_hat) from the sigmoid neuron or values direct | |
from the linear node(Z) | |
Args: | |
Y: labels of data | |
P_hat: Probabilities from sigmoid function | |
from_logits: flag to check if logits are being provided or not(Default: False) |
This file contains bidirectional Unicode text that may be interpreted or compiled differently than what appears below. To review, open the file in an editor that reveals hidden Unicode characters.
Learn more about bidirectional Unicode characters
def compute_stable_bce_cost(Y, Z): | |
""" | |
This function computes the "Stable" Binary Cross-Entropy(stable_bce) Cost and returns the Cost and its | |
derivative w.r.t Z_last(the last linear node) . | |
The Stable Binary Cross-Entropy Cost is defined as: | |
=> (1/m) * np.sum(max(Z,0) - ZY + log(1+exp(-|Z|))) | |
Args: | |
Y: labels of data | |
Z: Values from the last linear node |
This file contains bidirectional Unicode text that may be interpreted or compiled differently than what appears below. To review, open the file in an editor that reveals hidden Unicode characters.
Learn more about bidirectional Unicode characters
def compute_bce_cost(Y, P_hat): | |
""" | |
This function computes Binary Cross-Entropy(bce) Cost and returns the Cost and its | |
derivative. | |
This function uses the following Binary Cross-Entropy Cost defined as: | |
=> (1/m) * np.sum(-Y*np.log(P_hat) - (1-Y)*np.log(1-P_hat)) | |
Args: | |
Y: labels of data | |
P_hat: Estimated output probabilities from the last layer, the output layer |
This file contains bidirectional Unicode text that may be interpreted or compiled differently than what appears below. To review, open the file in an editor that reveals hidden Unicode characters.
Learn more about bidirectional Unicode characters
costs = [] # initially empty list, this will store all the costs after a certian number of epochs | |
# Start training | |
for epoch in range(number_of_epochs): | |
# ------------------------- forward-prop ------------------------- | |
Z1.forward(X_train) | |
A1.forward(Z1.Z) | |
Z2.forward(A1.A) |
This file contains bidirectional Unicode text that may be interpreted or compiled differently than what appears below. To review, open the file in an editor that reveals hidden Unicode characters.
Learn more about bidirectional Unicode characters
# define training constants | |
learning_rate = 1 | |
number_of_epochs = 5000 | |
np.random.seed(48) # set seed value so that the results are reproduceable | |
# (weights will now be initailzaed to the same pseudo-random numbers, each time) | |
# Our network architecture has the shape: | |
# (input)--> [Linear->Sigmoid] -> [Linear->Sigmoid] -->(output) |
This file contains bidirectional Unicode text that may be interpreted or compiled differently than what appears below. To review, open the file in an editor that reveals hidden Unicode characters.
Learn more about bidirectional Unicode characters
def compute_cost(Y, Y_hat): | |
""" | |
This function computes and returns the Cost and its derivative. | |
The is function uses the Squared Error Cost function -> (1/2m)*sum(Y - Y_hat)^.2 | |
Args: | |
Y: labels of data | |
Y_hat: Predictions(activations) from a last layer, the output layer | |
Returns: |
This file contains bidirectional Unicode text that may be interpreted or compiled differently than what appears below. To review, open the file in an editor that reveals hidden Unicode characters.
Learn more about bidirectional Unicode characters
import numpy as np | |
def initialize_parameters(n_in, n_out, ini_type='plain'): | |
""" | |
Helper function to initialize some form of random weights and Zero biases | |
Args: | |
n_in: size of input layer | |
n_out: size of output/number of neurons | |
ini_type: set initialization type for weights |
NewerOlder