Skip to content

Instantly share code, notes, and snippets.

@ntakouris
Created February 7, 2021 20:32
Show Gist options
  • Save ntakouris/52b95d4bfd4462e0152255f5c636c8d8 to your computer and use it in GitHub Desktop.
Save ntakouris/52b95d4bfd4462e0152255f5c636c8d8 to your computer and use it in GitHub Desktop.
z_prev=0
f_grad_prev=0
v=0
y_prev = 0
d_grad_prev = 0
generated_prev = 0
d_out_prev = 0
print(f'Using m : {self.config.m}')
for i in range(self.config.nIter):
in_dict = {
self.masks: self.masks_data, # mask
self.gi: self.z, # noise sampled
self.images: self.images_data # test set (subset from celeba)
}
g_loss, lc_grad, img, d_out, d_grad = self.sess.run([self.gl, self.lc_grad, self.go, self.do_fix, self.d_grad], feed_dict=in_dict)
x_n = self.images_data * self.masks_data + (1-self.masks_data) * img
x_n_prev = self.images_data * self.masks_data + (1-self.masks_data) * generated_prev
blended_d_out_prev, = self.sess.run([self.do], feed_dict={self.di: x_n_prev})
dz = self.z - z_prev
z_prev = np.copy(self.z)
if i >= 1:
__d_grad = np.expand_dims(d_grad_prev, axis=-1)
__d_grad_t = __d_grad.transpose(0, 2, 1)
# I - μ nabla(d) nabla(d)^t
# --------------------
# 1 + μ ||nabla(d)||^2
f_grad_a_fract = np.matmul(__d_grad, __d_grad_t)
f_grad_a_fract /= (1 + self.config.m * (np.expand_dims(np.linalg.norm(d_grad_prev, ord=2, axis=1, keepdims=True), axis=-1)**2))
f_grad_a = (np.eye(100) - (self.config.m) * f_grad_a_fract)
#f_grad_b = lc_grad + self.config.m * (blended_d_out_prev - 1) * d_grad_prev
f_grad_b = lc_grad - self.config.m * (blended_d_out_prev - d_out_prev) * d_grad_prev
f_grad = np.matmul(f_grad_a, np.expand_dims(f_grad_b, axis=-1))
f_grad_diff = np.squeeze(f_grad) - np.squeeze(f_grad_prev)
f_grad_prev = np.copy(f_grad)
dotprod = np.sum(dz * f_grad_diff, axis=1)
lambd = np.abs(dotprod) / (np.linalg.norm(f_grad_diff, ord=2, axis=1)**2)
lambd = np.expand_dims(lambd, axis=-1)
k = lambd * np.squeeze(f_grad)
else:
f_grad_diff = np.squeeze(lc_grad) - np.squeeze(f_grad_prev)
f_grad_prev = np.copy(lc_grad)
dotprod = np.sum(dz * f_grad_diff, axis=1)
lambd = np.abs(dotprod) / (np.linalg.norm(f_grad_diff, ord=2, axis=1)**2)
lambd = np.expand_dims(lambd, axis=-1)
k = lambd * lc_grad
d_grad_prev = np.copy(d_grad)
v_prev = np.copy(v)
# k = lr * gradient
self.z -= k
### Update momentum and z
# v = self.config.momentum * v - k
# self.z += (-self.config.momentum * v_prev +
# (1 + self.config.momentum) * v)
if self.config.normalizez:
self.z = sqrt(self.config.initialz) * (self.z - np.expand_dims(np.mean(self.z, axis=-1), axis=-1)) / np.expand_dims(np.std(self.z, axis=-1), axis=-1)
d_out_prev = np.copy(d_out)
generated_prev = np.copy(img)
Sign up for free to join this conversation on GitHub. Already have an account? Sign in to comment