Skip to content

Instantly share code, notes, and snippets.

@arnaldog12
Last active January 9, 2023 17:34
Show Gist options
  • Save arnaldog12/5f2728f229a8bd3b4673b72786913252 to your computer and use it in GitHub Desktop.
Save arnaldog12/5f2728f229a8bd3b4673b72786913252 to your computer and use it in GitHub Desktop.
Custom Metrics for Keras and TensorFlow
import numpy as np
import tensorflow as tf
from keras import backend as K
def recall(y_true, y_pred):
true_positives = K.sum(K.round(K.clip(y_true * y_pred, 0, 1)))
possible_positives = K.sum(K.round(K.clip(y_true, 0, 1)))
recall_keras = true_positives / (possible_positives + K.epsilon())
return recall_keras
def precision(y_true, y_pred):
true_positives = K.sum(K.round(K.clip(y_true * y_pred, 0, 1)))
predicted_positives = K.sum(K.round(K.clip(y_pred, 0, 1)))
precision_keras = true_positives / (predicted_positives + K.epsilon())
return precision_keras
def specificity(y_true, y_pred):
tn = K.sum(K.round(K.clip((1 - y_true) * (1 - y_pred), 0, 1)))
fp = K.sum(K.round(K.clip((1 - y_true) * y_pred, 0, 1)))
return tn / (tn + fp + K.epsilon())
def negative_predictive_value(y_true, y_pred):
tn = K.sum(K.round(K.clip((1 - y_true) * (1 - y_pred), 0, 1)))
fn = K.sum(K.round(K.clip(y_true * (1 - y_pred), 0, 1)))
return tn / (tn + fn + K.epsilon())
def f1(y_true, y_pred):
p = precision(y_true, y_pred)
r = recall(y_true, y_pred)
return 2 * ((p * r) / (p + r + K.epsilon()))
def fbeta(y_true, y_pred, beta=2):
y_pred = K.clip(y_pred, 0, 1)
tp = K.sum(K.round(K.clip(y_true * y_pred, 0, 1)), axis=1)
fp = K.sum(K.round(K.clip(y_pred - y_true, 0, 1)), axis=1)
fn = K.sum(K.round(K.clip(y_true - y_pred, 0, 1)), axis=1)
p = tp / (tp + fp + K.epsilon())
r = tp / (tp + fn + K.epsilon())
num = (1 + beta ** 2) * (p * r)
den = (beta ** 2 * p + r + K.epsilon())
return K.mean(num / den)
def matthews_correlation_coefficient(y_true, y_pred):
tp = K.sum(K.round(K.clip(y_true * y_pred, 0, 1)))
tn = K.sum(K.round(K.clip((1 - y_true) * (1 - y_pred), 0, 1)))
fp = K.sum(K.round(K.clip((1 - y_true) * y_pred, 0, 1)))
fn = K.sum(K.round(K.clip(y_true * (1 - y_pred), 0, 1)))
num = tp * tn - fp * fn
den = (tp + fp) * (tp + fn) * (tn + fp) * (tn + fn)
return num / K.sqrt(den + K.epsilon())
def equal_error_rate(y_true, y_pred):
n_imp = tf.count_nonzero(tf.equal(y_true, 0), dtype=tf.float32) + tf.constant(K.epsilon())
n_gen = tf.count_nonzero(tf.equal(y_true, 1), dtype=tf.float32) + tf.constant(K.epsilon())
scores_imp = tf.boolean_mask(y_pred, tf.equal(y_true, 0))
scores_gen = tf.boolean_mask(y_pred, tf.equal(y_true, 1))
loop_vars = (tf.constant(0.0), tf.constant(1.0), tf.constant(0.0))
cond = lambda t, fpr, fnr: tf.greater_equal(fpr, fnr)
body = lambda t, fpr, fnr: (
t + 0.001,
tf.divide(tf.count_nonzero(tf.greater_equal(scores_imp, t), dtype=tf.float32), n_imp),
tf.divide(tf.count_nonzero(tf.less(scores_gen, t), dtype=tf.float32), n_gen)
)
t, fpr, fnr = tf.while_loop(cond, body, loop_vars, back_prop=False)
eer = (fpr + fnr) / 2
return eer
@DohaNaga
Copy link

Hi Arnal,
I realized that my balanced accuracy values calculated within tensorflow are not the same ones calculated by the regular confusion matrix from the caret package (it's in R, but the code is very similar). Have you had any experience with that in python? I tried both codes for balanced accuracy:

Tensorflow

balanced_acc <- custom_metric("balanced_acc",function(y_true,y_pred){
  y_pred_pos = k_round(k_clip(y_pred, 0, 1))
  y_pred_neg = 1 - y_pred_pos
  
  y_pos = k_round(k_clip(y_true, 0, 1))
  y_neg = 1 - y_pos
  
  tp = k_sum(y_pos * y_pred_pos)
  tn = k_sum(y_neg * y_pred_neg)
  
  fp = k_sum(y_neg * y_pred_pos)
  fn = k_sum(y_pos * y_pred_neg)
  
  sensi = (tp/(tp + fn + k_epsilon()))
  specifi = (tn/(tn + fp + k_epsilon()))
  
  return((sensi + specifi )/ 2 )
  
})


##or 

balanced_acc <- custom_metric("balanced_acc",function(y_true,y_pred){

  tp = k_sum(k_round(k_clip(y_true * y_pred, 0, 1)))
  tn =  k_sum(k_round(k_clip((1 - y_true) * (1 - y_pred), 0, 1)))
  
  fp = k_sum(k_round(k_clip((1 - y_true) * y_pred, 0, 1)))
  fn = k_sum(k_round(k_clip(y_true * (1 - y_pred), 0, 1)))
  
  sensi = (tp/(tp + fn + k_epsilon()))
  specifi = (tn/(tn + fp + k_epsilon()))
  
  return((sensi + specifi )/ 2 )
  
})

caret

  conf.matrix <-  caret::confusionMatrix(
    factor(pred_model, levels = 0:1),
    factor(y_test, levels = 0:1),
    positive = "1"
  )
  
  BalancedAccuracy <- conf.matrix$byClass[11]

@arnaldog12
Copy link
Author

Hi, @DohaNaga
I would check the shape of each matrix and if they are disposed in the same way (i.e., if rows are samples and cols are features).

Sign up for free to join this conversation on GitHub. Already have an account? Sign in to comment