Skip to content

Instantly share code, notes, and snippets.

@karolzak
Created June 27, 2019 13:48
Show Gist options
  • Save karolzak/754f15baf902c9df6cab173573d02dfa to your computer and use it in GitHub Desktop.
Save karolzak/754f15baf902c9df6cab173573d02dfa to your computer and use it in GitHub Desktop.
semantic segmentation losses
# https://lars76.github.io/neural-networks/object-detection/losses-for-segmentation/
def weighted_cross_entropy(beta):
def convert_to_logits(y_pred):
# see https://github.com/tensorflow/tensorflow/blob/r1.10/tensorflow/python/keras/backend.py#L3525
y_pred = tf.clip_by_value(y_pred, tf.keras.backend.epsilon(), 1 - tf.keras.backend.epsilon())
return tf.log(y_pred / (1 - y_pred))
def loss(y_true, y_pred):
y_pred = convert_to_logits(y_pred)
loss = tf.nn.weighted_cross_entropy_with_logits(logits=y_pred, targets=y_true, pos_weight=beta)
return tf.reduce_mean(loss)
return loss
#############
def balanced_cross_entropy(beta):
def convert_to_logits(y_pred):
# see https://github.com/tensorflow/tensorflow/blob/r1.10/tensorflow/python/keras/backend.py#L3525
y_pred = tf.clip_by_value(y_pred, tf.keras.backend.epsilon(), 1 - tf.keras.backend.epsilon())
return tf.log(y_pred / (1 - y_pred))
def loss(y_true, y_pred):
y_pred = convert_to_logits(y_pred)
pos_weight = beta / (1 - beta)
loss = tf.nn.weighted_cross_entropy_with_logits(logits=y_pred, targets=y_true, pos_weight=pos_weight)
return tf.reduce_mean(loss * (1 - beta))
return loss
#########
def focal_loss(alpha=0.25, gamma=2):
def focal_loss_with_logits(logits, targets, alpha, gamma, y_pred):
weight_a = alpha * (1 - y_pred) ** gamma * targets
weight_b = (1 - alpha) * y_pred ** gamma * (1 - targets)
return (tf.log1p(tf.exp(-tf.abs(logits))) + tf.nn.relu(-logits)) * (weight_a + weight_b) + logits * weight_b
def loss(y_true, y_pred):
y_pred = tf.clip_by_value(y_pred, tf.keras.backend.epsilon(), 1 - tf.keras.backend.epsilon())
logits = tf.log(y_pred / (1 - y_pred))
loss = focal_loss_with_logits(logits=logits, targets=y_true, alpha=alpha, gamma=gamma, y_pred=y_pred)
return tf.reduce_mean(loss)
return loss
##########
from scipy.spatial import distance_matrix
import numpy as np
...
not_zeros = np.argwhere(img != 0)
zeros = np.argwhere(img == 0)
dist_matrix = distance_matrix(zeros, not_zeros, p=2)
output = np.zeros((HEIGHT, WIDTH, 1), dtype=np.uint8)
i = 0
dist = np.min(dist_matrix, axis=1)
for y in range(HEIGHT):
for x in range(WIDTH):
if img[y,x] == 0:
output[y,x] = dist[i]
i += 1
...
from functools import partial
def loss_function(y_true, y_pred, weights):
...
weight_input = Input(shape=(HEIGHT, WIDTH))
loss = partial(loss_function, weights=weight_input)
############
def dice_loss(y_true, y_pred):
numerator = 2 * tf.reduce_sum(y_true * y_pred)
# some implementations don't square y_pred
denominator = tf.reduce_sum(y_true + tf.square(y_pred))
return numerator / (denominator + tf.keras.backend.epsilon())
############
def tversky_loss(beta):
def loss(y_true, y_pred):
numerator = tf.reduce_sum(y_true * y_pred)
denominator = y_true * y_pred + beta * (1 - y_true) * y_pred + (1 - beta) * y_true * (1 - y_pred)
return numerator / (tf.reduce_sum(denominator) + tf.keras.backend.epsilon())
return loss
##########
def lovasz_softmax(y_true, y_pred):
return lovasz_hinge(labels=y_true, logits=y_pred)
model.compile(loss=lovasz_softmax, optimizer=optimizer, metrics=[pixel_iou])
Sign up for free to join this conversation on GitHub. Already have an account? Sign in to comment