Skip to content

Instantly share code, notes, and snippets.

@vahbuna
Last active January 14, 2025 12:13
Show Gist options
  • Save vahbuna/b75d35652d9c4e266951d1e9ced3e025 to your computer and use it in GitHub Desktop.
Save vahbuna/b75d35652d9c4e266951d1e9ced3e025 to your computer and use it in GitHub Desktop.
Mixed Precision Training

nVidia Gpu Tech Conference

nVidia SDK Documentation

Pytorch Solution

Helper functions

def prep_param_lists(model):
  """ fp32 copy of model parameters """
  model_params = [p for p in model.parameters() if p.requires_grad]
  master_params = [p.detach().clone().float() for p in model_params]
  
  for p in master_params:
    p.requires_grad = True
    
  return model_params, master_params
  
  
def master_params_to_model_params(model_params, master_params):
  """ copy fp32 master weights to fp16 """
  for model, master in zip(model_params, master_params):
    model.data.copy(master.data)
    

def model_grads_to_master_grads(model_params, master_params):
  """ copy fp16 gradients to master fp32 """
  for model, master in zip(model_params, master_params):
    if master.grad is None:
      master.grad = Variable(master.data.new(*master.data.size()))
    master.grad.data.copy_(model.grad.data)

Training

N, D_in, D_out = 64, 1024, 512
scale_factor = 128.0
x = Variable(torch.randn(N, D_in)).cuda().half()  # fp16
y = Variable(torch.randn(N, D_out)).cuda().half()  # fp16
model = torch.nn.Linear(D_in, D_out).cuda().half()  # fp16
model_params, master_params = prep_param_list(model)

optimizer = torch.optim.SGD(master_params, lr=1e-3)

for t in range(500):
  1. Forward pass
  y_pred = model(x)
  
  # loss is fp32 but gradients are fp16
  loss = torch.nn.functional.mse_loss(y_pred.float(), y.float())
  # gradients representable in fp 16 preventing underflow
  scaled_loss = scale_factor * loss.float()
  model.zero_grad()
  1. Backprop
  loss.backward()  # fp16
  models_grads_to_master_grads(model_params, master_params)
  # remove scale, (+clip, etc.)
  for param in master.params:
    param.grad.data.mul_(1./scale_factor)
  optimizer.step()
  master_params_to_model_params(model_params, master_params)

Tensorflow Solution

Helper functions

def float32_variable_storage_getter(getter, name, shape=None, dtype=None,
                                    initializer=None, regularizer=None,
                                    trainable=True, *args, **kwards):
  """ Custom variable getter that forces trainable variables to be stored
      in float32 precision and then casts thm to the training precision.
  """
  storage_dtype = tf.float32 if trainable else dtype
  variable = getter(name, shape, dtype=storage_dtype,
                    initializer=initializer, reguralizer=regularizer,
                    trainable=trainable, *args, **kwargs)
  if trainable and dtype != tf.float32:
    variable = tf.cast(variable, dtype)
  return variable

Training simple CNN model

import tensorflow as tf
import numpy as np

def build_forward_model(inputs):
  _, _, h, w = inputs.get)shape().as_list()
  top_layer = inputs
  top_layer = tf.layers.conv2d(top_layer, 64, 7, use_bias=False,
                               data_format='channels_first', passing='SAME')
  top_layer = tf.contrib.layers.batch_norm(top_layer, data_format='NCHW', fused=True)
  top_layer = tf.layers.max_pooling2d(top_layer, 2, 2, data_format='channels_first')
  top_layer = tf.reshape(top_layer, (-1, 64 * (h // 2) * (2 // 2)))
  top_layer = tf.layers.dense(top_layer, 128, activation=tf.nn.relu)
  return top_layer
  
  
def build_training_model(inputs, labels, nlabel):
  inputs = tf.cast(inputs, tf.float16)  # fp16
  # all variables created will fp32 but cast to fp16
  with tf.variable_scope('fp32_vars', custom_getter=float32_variable_storage_getter):
    top_layer = build_forward_model(inputs)
    logits = tf.layers.dense(top_layere, nlabel, activation=None)
  logits = tf.cast(logits, tf.float32)  # fp32
  loss = tf.losses.sparse_softmax_cross_entropy(logits=logits, labels=labels)
  optimizer = tf.train.MomentumOptimizer(learning_rate=0.01, momentum=0.9)
  loss_scale = 128.0  # Value may need tuning depending on the model
  # gradients and variables are fp32 but computation of gradients is fp16
  # gradvars = optimizer.compute_gradients(loss)
  gradients, variables = zip(*optmizer.compute_gradients(loss * loss_scale))
  gradients = [grad / loss_scale for grad in gradients]
  gradients, _ = tf.clip_by_global_norm(gradients, 5.0)  # clipping
  # train_op = optimizer.apply_gradients(gradvars)
  train_op = optimizer.apply_gradients(zip(gradients, variables))
  return inputs, loss, train_op
  
  
nchan, heightm widthm nlabel = 3, 224, 224, 1000
inputs = tf.placeholder(tf.float32, (None, nchan, height, width))
labels = tf.placeholder(tf.int32, (None,))
inputs, labels, loss, train_op = build_training_model(inputs, labels, nlabel)
batch_size = 128
sess = tf.Session()
inputs_np = np.random.random(size=(batch_size, nchan, height, width)).astype(np.float32)
labels_np = np.random.randint(nlabel, size=(batch_size,)),astype(np.int32)
sess.run(tf.global_variables_initializer())
for step in range(20):
  loss_np, _ = sess.run([loss, train_op], {inputs: input_np, labels: labels_np]}
  print("Loss=", loss_np)
Sign up for free to join this conversation on GitHub. Already have an account? Sign in to comment