Skip to content

Instantly share code, notes, and snippets.

@sadimanna
Last active September 19, 2022 14:29
Show Gist options
  • Select an option

  • Save sadimanna/fa22e0bb6df059b1a019e7743a7284c7 to your computer and use it in GitHub Desktop.

Select an option

Save sadimanna/fa22e0bb6df059b1a019e7743a7284c7 to your computer and use it in GitHub Desktop.
class SimCLR_Loss(nn.Module):
def __init__(self, batch_size, temperature):
super().__init__()
self.batch_size = batch_size
self.temperature = temperature
self.mask = self.mask_correlated_samples(batch_size)
self.criterion = nn.CrossEntropyLoss(reduction="sum")
self.similarity_f = nn.CosineSimilarity(dim=2)
def mask_correlated_samples(self, batch_size):
N = 2 * batch_size
mask = torch.ones((N, N), dtype=bool)
mask = mask.fill_diagonal_(0)
for i in range(batch_size):
mask[i, batch_size + i] = 0
mask[batch_size + i, i] = 0
return mask
def forward(self, z_i, z_j):
N = 2 * self.batch_size
z = torch.cat((z_i, z_j), dim=0)
sim = self.similarity_f(z.unsqueeze(1), z.unsqueeze(0)) / self.temperature
sim_i_j = torch.diag(sim, self.batch_size)
sim_j_i = torch.diag(sim, -self.batch_size)
# We have 2N samples, but with Distributed training every GPU gets N examples too, resulting in: 2xNxN
positive_samples = torch.cat((sim_i_j, sim_j_i), dim=0).reshape(N, 1)
negative_samples = sim[self.mask].reshape(N, -1)
#SIMCLR
labels = torch.from_numpy(np.array([0]*N)).reshape(-1).to(positive_samples.device).long() #.float()
logits = torch.cat((positive_samples, negative_samples), dim=1)
loss = self.criterion(logits, labels)
loss /= N
return loss
Sign up for free to join this conversation on GitHub. Already have an account? Sign in to comment