Created
          April 20, 2020 06:32 
        
      - 
      
- 
        Save ChuaCheowHuan/b8b0653f9bf721768026677b5fd0847b to your computer and use it in GitHub Desktop. 
    tf KL divergence
  
        
  
    
      This file contains hidden or bidirectional Unicode text that may be interpreted or compiled differently than what appears below. To review, open the file in an editor that reveals hidden Unicode characters.
      Learn more about bidirectional Unicode characters
    
  
  
    
  | import tensorflow as tf | |
| import numpy as np | |
| m1 = np.array([[[0.0], [1.1]], [[0.1], [1.2]], [[0.2], [1.3]]]) | |
| s1 = np.array([[[1.1]], [[1.2]], [[1.3]]]) | |
| m2 = np.array([[[1.0]], [[1.1]], [[1.2]]]) | |
| s2 = np.array([[[0.1]], [[0.2]], [[0.3]]]) | |
| print(m1.shape) | |
| nd_1 = tf.distributions.Normal(loc=m1, scale=s1) | |
| nd_2 = tf.distributions.Normal(loc=m2, scale=s2) | |
| KL = tf.distributions.kl_divergence(nd_1, nd_2) # to be added to loss function, try to minimize KL | |
| with tf.Session() as sess: | |
| print(sess.run(KL)) | 
  
    Sign up for free
    to join this conversation on GitHub.
    Already have an account?
    Sign in to comment