Last active
February 22, 2018 20:35
-
-
Save ipashchenko/291753b7f5ce362f9226b7b1dcb80d00 to your computer and use it in GitHub Desktop.
Trying celerite
This file contains hidden or bidirectional Unicode text that may be interpreted or compiled differently than what appears below. To review, open the file in an editor that reveals hidden Unicode characters.
Learn more about bidirectional Unicode characters
import pickle | |
import numpy as np | |
from scipy.optimize import minimize | |
import celerite | |
from celerite import terms | |
import emcee | |
import matplotlib.pyplot as plt | |
# Ilya: We only need x, y, yerr 1D arrays. Substitute your's | |
with open("0716+714_umrao_lc.pkl", "rb") as fo: | |
data = pickle.load(fo) | |
data15 = data[0] | |
x = data15[:, 0] | |
y = data15[:, 1] | |
yerr = data15[:, 2] | |
# Set up the GP model | |
# Ilya: I tryied that kernel - it nearly linearly interpolated my points:) | |
# kernel = terms.RealTerm(log_a=np.log(np.var(y)), log_c=-np.log(10.0)) | |
# Ilya: Need more expriments with this kernel. Q > 1 gives nearly | |
# periodic oscilations. Q<1 => PSD more red-noise-like. | |
# See https://arxiv.org/pdf/1703.09710.pdf | |
# Q = 1.0 / 3.0 | |
# w0 = 100.0 | |
# S0 = np.var(y) / (w0 * Q) | |
# bounds = dict(log_S0=(-25, 15), log_Q=(-25, 15), log_omega0=(-15, 15)) | |
# kernel = terms.SHOTerm(log_S0=np.log(S0), log_Q=np.log(Q), log_omega0=np.log(w0), | |
# bounds=bounds) | |
# kernel.freeze_parameter("log_Q") # We don't want to fit for "Q" in this term | |
kernel = terms.Matern32Term(log_sigma=1., log_rho=10.) | |
# Ilya: Additional white noise that sums with observed errors | |
kernel += terms.JitterTerm(-1.6) | |
gp = celerite.GP(kernel) | |
gp.compute(x, yerr) | |
print("Initial log-likelihood: {0}".format(gp.log_likelihood(y))) | |
def neg_log_like(params, y, gp): | |
gp.set_parameter_vector(params) | |
return -gp.log_likelihood(y) | |
def grad_neg_log_like(params, y, gp): | |
gp.set_parameter_vector(params) | |
return -gp.grad_log_likelihood(y)[1] | |
# Fit for the maximum likelihood parameters | |
initial_params = gp.get_parameter_vector() | |
bounds = gp.get_parameter_bounds() | |
soln = minimize(neg_log_like, initial_params, jac=grad_neg_log_like, | |
method="L-BFGS-B", bounds=bounds, args=(y, gp)) | |
gp.set_parameter_vector(soln.x) | |
print("Final log-likelihood: {0}".format(-soln.fun)) | |
# Make the maximum likelihood prediction | |
t = np.linspace(x[0], x[-1], 5000) | |
mu, var = gp.predict(y, t, return_var=True) | |
std = np.sqrt(var) | |
# Plot the data | |
color = "#ff7f0e" | |
plt.errorbar(x, y, yerr=yerr, fmt=".k", capsize=0) | |
plt.plot(t, mu, color=color) | |
plt.fill_between(t, mu+std, mu-std, color=color, alpha=0.3, edgecolor="none") | |
plt.ylabel(r"$y$") | |
plt.xlabel(r"$t$") | |
plt.gca().yaxis.set_major_locator(plt.MaxNLocator(5)) | |
plt.title("maximum likelihood prediction") | |
# Sample posterior of hyperparameters with MCMC | |
def log_probability(params): | |
gp.set_parameter_vector(params) | |
lp = gp.log_prior() | |
if not np.isfinite(lp): | |
return -np.inf | |
return gp.log_likelihood(y) + lp | |
initial = np.array(soln.x) | |
ndim, nwalkers = len(initial), 32 | |
sampler = emcee.EnsembleSampler(nwalkers, ndim, log_probability) | |
print("Running burn-in...") | |
p0 = initial + 1e-8 * np.random.randn(nwalkers, ndim) | |
p0, lp, _ = sampler.run_mcmc(p0, 500) | |
print("Running production...") | |
sampler.reset() | |
sampler.run_mcmc(p0, 2000) | |
# Plot the data. | |
plt.errorbar(x, y, yerr=yerr, fmt=".k", capsize=0) | |
# Plot 24 posterior samples. | |
samples = sampler.flatchain | |
for s in samples[np.random.randint(len(samples), size=100)]: | |
gp.set_parameter_vector(s) | |
mu = gp.predict(y, t, return_cov=False) | |
# plt.plot(t, mu, color=color, alpha=0.1) | |
plt.plot(t, np.gradient(mu), color="r", alpha=0.2) | |
plt.ylabel(r"$y$") | |
plt.xlabel(r"$t$") | |
plt.gca().yaxis.set_major_locator(plt.MaxNLocator(5)) | |
plt.title("posterior predictions") |
Sign up for free
to join this conversation on GitHub.
Already have an account?
Sign in to comment