Skip to content

Instantly share code, notes, and snippets.

@alaiacano
Created April 1, 2013 12:26
Show Gist options
  • Save alaiacano/5284682 to your computer and use it in GitHub Desktop.
Save alaiacano/5284682 to your computer and use it in GitHub Desktop.
import numpy as np
def entropy(x):
return -1. * sum([p * np.log2(p) for p in x if p > 0])
def conditional_entropy(x, axis=0):
if not axis in [0, 1]:
raise Exception("Axis must be 0 or 1")
rows, cols = np.shape(x)
# marginal probability p(x)
px = np.sum(x, axis)
px /= np.sum(px)
h = 0.0
for c in xrange(cols):
# p(y|X=x)
if axis==0:
py_x = x[:, c] / px[c]
else:
py_x = x[c, :] / px[c]
# H(Y|X=x)
z = px[c] * entropy(py_x)
h += z
return h
x = np.array([
[0, 10., 4., 2.],
[10., 0., 1., 7.],
[4., 1., 0., 1.],
[2., 7., 1., 0.]
])
x = x / np.sum(np.sum(x))
px = np.sum(x, 1)
py = np.sum(x, 0)
print x
print "mutual information:", entropy(py) - conditional_entropy(x.transpose())
p1 = np.array([[.1, .3, .6]])
p2 = np.array([[.3, .2, .5]])
d = np.dot(p1.transpose(), p2)
px = np.sum(d, 1)
py = np.sum(d, 0)
print "independent distributions"
print d
print "mutual information:", entropy(px) - conditional_entropy(d)
Sign up for free to join this conversation on GitHub. Already have an account? Sign in to comment