Skip to content

Instantly share code, notes, and snippets.

@Kautenja
Last active November 16, 2022 09:23
Show Gist options
  • Save Kautenja/6b7956b30c28ca250b2687dc6db96fb7 to your computer and use it in GitHub Desktop.
Save Kautenja/6b7956b30c28ca250b2687dc6db96fb7 to your computer and use it in GitHub Desktop.
A Python method for calculating accuracy, true positives/negatives, and false positives/negatives from prediction and ground truth arrays.
"""A method to calculate the number of True/False Positive/Negative guesses."""
import numpy as np
def tpfp(predictions: np.ndarray,
ground_truth: np.ndarray,
negative: int=0.0,
positive: int=1.0,
normalize: bool=True) -> dict:
"""
Return a dictionary of accuracy and true/false negative/positive guesses.
Args:
predictions: an array of predicted labels
ground_truth: an array of ground truth labels
negative: a sentinel value indicating a negative label
positive: a sentinel value indicating a positive label
normalize: whether to normalize the tpfp values by the label counts
Returns: a dictionary of metrics:
'acc': the binary classification accuracy
'tp': the amount of true positives
'tn': the amount of true negatives
'fp': the amount of false positives
'fn': the amount of false negatives
"""
# compute the raw accuracy
acc = np.mean(predictions == ground_truth)
# accumulate the true/false negative/positives
tp = np.sum(np.logical_and(predictions == positive, ground_truth == positive))
tn = np.sum(np.logical_and(predictions == negative, ground_truth == negative))
fp = np.sum(np.logical_and(predictions == positive, ground_truth == negative))
fn = np.sum(np.logical_and(predictions == negative, ground_truth == positive))
# normalize the true/false negative/positives to a percentages if
# normalization is enabled
if normalize:
# calculate the total number of positive guesses
total_positive = np.sum(predictions == positive)
if total_positive == 0:
# avoid divide by zero
tp = 0
fp = 0
else:
# normalize by the total number of positive guesses
tp = tp / total_positive
fp = fp / total_positive
# calculate the total number of negative guesses
total_negative = np.sum(predictions == negative)
if total_negative == 0:
# avoid divide by zero
tn = 0
fn = 0
else:
# normalize by the total number of negative guesses
tn = tn / total_negative
fn = fn / total_negative
# return a dictionary of the raw accuracy and true/false positive/negative
# values
return {
'acc': acc,
'tp': tp,
'tn': tn,
'fp': fp,
'fn': fn
}
# explicitly define the outward facing API for this module
__all__ = ['tpfp']
Sign up for free to join this conversation on GitHub. Already have an account? Sign in to comment