Skip to content

Instantly share code, notes, and snippets.

@smsharma
Created January 30, 2019 19:00
Show Gist options
  • Save smsharma/0c06cac7f05e715fdee058cacfc32de2 to your computer and use it in GitHub Desktop.
Save smsharma/0c06cac7f05e715fdee058cacfc32de2 to your computer and use it in GitHub Desktop.
# define a theano Op for our likelihood function
class LogLikeWithGrad(tt.Op):
itypes = [tt.dvector] # expects a vector of parameter values when called
otypes = [tt.dscalar] # outputs a single scalar value (the log likelihood)
def __init__(self, loglike):
"""
Initialise with various things that the function requires. Below
are the things that are needed in this particular example.
Parameters
----------
loglike:
The log-likelihood (or whatever) function we've defined
data:
The "observed" data that our log-likelihood function takes in
x:
The dependent variable (aka 'x') that our model requires
sigma:
The noise standard deviation that out function requires.
"""
# add inputs as class attributes
self.likelihood = loglike
# initialise the gradient Op (below)
self.logpgrad = LogLikeGrad(self.likelihood)
def perform(self, node, inputs, outputs):
# the method that is used when calling the Op
theta, = inputs # this will contain my variables
# call the log-likelihood function
logl = self.likelihood(theta)
outputs[0][0] = np.array(logl) # output the log-likelihood
def grad(self, inputs, g):
# the method that calculates the gradients - it actually returns the
# vector-Jacobian product - g[0] is a vector of parameter values
theta, = inputs # our parameters
return [g[0]*self.logpgrad(theta)]
class LogLikeGrad(tt.Op):
"""
This Op will be called with a vector of values and also return a vector of
values - the gradients in each dimension.
"""
itypes = [tt.dvector]
otypes = [tt.dvector]
def __init__(self, loglike):
"""
Initialise with various things that the function requires. Below
are the things that are needed in this particular example.
Parameters
----------
loglike:
The log-likelihood (or whatever) function we've defined
data:
The "observed" data that our log-likelihood function takes in
x:
The dependent variable (aka 'x') that our model requires
sigma:
The noise standard deviation that out function requires.
"""
# add inputs as class attributes
self.likelihood = loglike
def perform(self, node, inputs, outputs):
theta, = inputs
# define version of likelihood function to pass to derivative function
def lnlike(values):
return self.likelihood(values)
# calculate gradients
grads = gradients(theta, lnlike)
outputs[0][0] = grads
# create our Op
logl = LogLikeWithGrad(n.ll)
Sign up for free to join this conversation on GitHub. Already have an account? Sign in to comment