-
-
Save andydawson/c189f5130771f2885783 to your computer and use it in GitHub Desktop.
pred.cpp
This file contains bidirectional Unicode text that may be interpreted or compiled differently than what appears below. To review, open the file in an editor that reveals hidden Unicode characters.
Learn more about bidirectional Unicode characters
// manual gradient for the prediction model, works with 2.1.0 | |
#define EIGEN_DONT_PARALLELIZE | |
#include <iostream> | |
#include <sstream> | |
#include <utility> | |
#include <vector> | |
#include <typeinfo> | |
#include <boost/exception/all.hpp> | |
#include <stan/model/prob_grad.hpp> | |
#include <stan/prob/distributions.hpp> | |
#include <stan/math/matrix.hpp> | |
#include <stan/math.hpp> | |
#include <stan/io/dump.hpp> | |
#include <stan/io/reader.hpp> | |
#include <stan/io/writer.hpp> | |
#include <stan/io/csv_writer.hpp> | |
#include <unsupported/Eigen/KroneckerProduct> | |
namespace pred_model_namespace { | |
using namespace std; | |
using namespace stan::math; | |
using namespace stan::prob; | |
using namespace stan::io; | |
using namespace Eigen; | |
using stan::math::lgamma; | |
typedef Eigen::Matrix<double,Eigen::Dynamic,1> vector_d; | |
typedef Eigen::Matrix<double,1,Eigen::Dynamic> row_vector_d; | |
typedef Eigen::Matrix<double,Eigen::Dynamic,Eigen::Dynamic> matrix_d; | |
class pred_model : public stan::model::prob_grad { | |
private: | |
int K; | |
int N; | |
int T; | |
int N_knots; | |
int N_cores; | |
vector<vector<int> > y; | |
double rho; | |
double eta; | |
double gamma; | |
double psi; | |
vector_d phi; | |
vector<int> idx_cores; | |
matrix_d d; | |
matrix_d d_knots; | |
matrix_d d_inter; | |
matrix_d w; | |
matrix_d lag; | |
int W; | |
vector_d zeros; | |
vector_d ones; | |
matrix_d Eye_T; | |
matrix_d Eye_s; | |
matrix_d C_s; | |
matrix_d C_s_L; | |
matrix_d C_s_inv; | |
public: | |
pred_model(stan::io::var_context& context__, | |
std::ostream* pstream__ = 0) | |
: prob_grad::prob_grad(0) { | |
static const char* function__ = "pred_model_namespace::pred_model(%1%)"; | |
(void) function__; // dummy call to supress warning | |
size_t pos__; | |
(void) pos__; // dummy call to supress warning | |
std::vector<int> vals_i__; | |
std::vector<double> vals_r__; | |
context__.validate_dims("data initialization", "K", "int", context__.to_vec()); | |
K = int(0); | |
vals_i__ = context__.vals_i("K"); | |
pos__ = 0; | |
K = vals_i__[pos__++]; | |
context__.validate_dims("data initialization", "N", "int", context__.to_vec()); | |
N = int(0); | |
vals_i__ = context__.vals_i("N"); | |
pos__ = 0; | |
N = vals_i__[pos__++]; | |
context__.validate_dims("data initialization", "T", "int", context__.to_vec()); | |
T = int(0); | |
vals_i__ = context__.vals_i("T"); | |
pos__ = 0; | |
T = vals_i__[pos__++]; | |
context__.validate_dims("data initialization", "N_knots", "int", context__.to_vec()); | |
N_knots = int(0); | |
vals_i__ = context__.vals_i("N_knots"); | |
pos__ = 0; | |
N_knots = vals_i__[pos__++]; | |
context__.validate_dims("data initialization", "N_cores", "int", context__.to_vec()); | |
N_cores = int(0); | |
vals_i__ = context__.vals_i("N_cores"); | |
pos__ = 0; | |
N_cores = vals_i__[pos__++]; | |
context__.validate_dims("data initialization", "y", "int", context__.to_vec((N_cores * T),K)); | |
stan::math::validate_non_negative_index("y", "(N_cores * T)", (N_cores * T)); | |
stan::math::validate_non_negative_index("y", "K", K); | |
y = std::vector<std::vector<int> >((N_cores * T),std::vector<int>(K,int(0))); | |
vals_i__ = context__.vals_i("y"); | |
pos__ = 0; | |
size_t y_limit_1__ = K; | |
for (size_t i_1__ = 0; i_1__ < y_limit_1__; ++i_1__) { | |
size_t y_limit_0__ = (N_cores * T); | |
for (size_t i_0__ = 0; i_0__ < y_limit_0__; ++i_0__) { | |
y[i_0__][i_1__] = vals_i__[pos__++]; | |
} | |
} | |
context__.validate_dims("data initialization", "rho", "double", context__.to_vec()); | |
rho = double(0); | |
vals_r__ = context__.vals_r("rho"); | |
pos__ = 0; | |
rho = vals_r__[pos__++]; | |
context__.validate_dims("data initialization", "eta", "double", context__.to_vec()); | |
eta = double(0); | |
vals_r__ = context__.vals_r("eta"); | |
pos__ = 0; | |
eta = vals_r__[pos__++]; | |
context__.validate_dims("data initialization", "gamma", "double", context__.to_vec()); | |
gamma = double(0); | |
vals_r__ = context__.vals_r("gamma"); | |
pos__ = 0; | |
gamma = vals_r__[pos__++]; | |
context__.validate_dims("data initialization", "psi", "double", context__.to_vec()); | |
psi = double(0); | |
vals_r__ = context__.vals_r("psi"); | |
pos__ = 0; | |
psi = vals_r__[pos__++]; | |
stan::math::validate_non_negative_index("phi", "K", K); | |
phi = vector_d(K); | |
context__.validate_dims("data initialization", "phi", "vector_d", context__.to_vec(K)); | |
vals_r__ = context__.vals_r("phi"); | |
pos__ = 0; | |
size_t phi_i_vec_lim__ = K; | |
for (size_t i_vec__ = 0; i_vec__ < phi_i_vec_lim__; ++i_vec__) { | |
phi[i_vec__] = vals_r__[pos__++]; | |
} | |
context__.validate_dims("data initialization", "idx_cores", "int", context__.to_vec(N_cores)); | |
stan::math::validate_non_negative_index("idx_cores", "N_cores", N_cores); | |
idx_cores = std::vector<int>(N_cores,int(0)); | |
vals_i__ = context__.vals_i("idx_cores"); | |
pos__ = 0; | |
size_t idx_cores_limit_0__ = N_cores; | |
for (size_t i_0__ = 0; i_0__ < idx_cores_limit_0__; ++i_0__) { | |
idx_cores[i_0__] = vals_i__[pos__++]; | |
} | |
context__.validate_dims("data initialization", "d", "matrix_d", context__.to_vec(N,N)); | |
stan::math::validate_non_negative_index("d", "N", N); | |
stan::math::validate_non_negative_index("d", "N", N); | |
d = matrix_d(N,N); | |
vals_r__ = context__.vals_r("d"); | |
pos__ = 0; | |
size_t d_m_mat_lim__ = N; | |
size_t d_n_mat_lim__ = N; | |
for (size_t n_mat__ = 0; n_mat__ < d_n_mat_lim__; ++n_mat__) { | |
for (size_t m_mat__ = 0; m_mat__ < d_m_mat_lim__; ++m_mat__) { | |
d(m_mat__,n_mat__) = vals_r__[pos__++]; | |
} | |
} | |
context__.validate_dims("data initialization", "d_knots", "matrix_d", context__.to_vec(N_knots,N_knots)); | |
stan::math::validate_non_negative_index("d_knots", "N_knots", N_knots); | |
stan::math::validate_non_negative_index("d_knots", "N_knots", N_knots); | |
d_knots = matrix_d(N_knots,N_knots); | |
vals_r__ = context__.vals_r("d_knots"); | |
pos__ = 0; | |
size_t d_knots_m_mat_lim__ = N_knots; | |
size_t d_knots_n_mat_lim__ = N_knots; | |
for (size_t n_mat__ = 0; n_mat__ < d_knots_n_mat_lim__; ++n_mat__) { | |
for (size_t m_mat__ = 0; m_mat__ < d_knots_m_mat_lim__; ++m_mat__) { | |
d_knots(m_mat__,n_mat__) = vals_r__[pos__++]; | |
} | |
} | |
context__.validate_dims("data initialization", "d_inter", "matrix_d", context__.to_vec(N,N_knots)); | |
stan::math::validate_non_negative_index("d_inter", "N", N); | |
stan::math::validate_non_negative_index("d_inter", "N_knots", N_knots); | |
d_inter = matrix_d(N,N_knots); | |
vals_r__ = context__.vals_r("d_inter"); | |
pos__ = 0; | |
size_t d_inter_m_mat_lim__ = N; | |
size_t d_inter_n_mat_lim__ = N_knots; | |
for (size_t n_mat__ = 0; n_mat__ < d_inter_n_mat_lim__; ++n_mat__) { | |
for (size_t m_mat__ = 0; m_mat__ < d_inter_m_mat_lim__; ++m_mat__) { | |
d_inter(m_mat__,n_mat__) = vals_r__[pos__++]; | |
} | |
} | |
context__.validate_dims("data initialization", "w", "matrix_d", context__.to_vec(N_cores,N)); | |
stan::math::validate_non_negative_index("w", "N_cores", N_cores); | |
stan::math::validate_non_negative_index("w", "N", N); | |
w = matrix_d(N_cores,N); | |
vals_r__ = context__.vals_r("w"); | |
pos__ = 0; | |
size_t w_m_mat_lim__ = N_cores; | |
size_t w_n_mat_lim__ = N; | |
for (size_t n_mat__ = 0; n_mat__ < w_n_mat_lim__; ++n_mat__) { | |
for (size_t m_mat__ = 0; m_mat__ < w_m_mat_lim__; ++m_mat__) { | |
w(m_mat__,n_mat__) = vals_r__[pos__++]; | |
} | |
} | |
context__.validate_dims("data initialization", "lag", "matrix_d", context__.to_vec(T,T)); | |
stan::math::validate_non_negative_index("lag", "T", T); | |
stan::math::validate_non_negative_index("lag", "T", T); | |
lag = matrix_d(T,T); | |
vals_r__ = context__.vals_r("lag"); | |
pos__ = 0; | |
size_t lag_m_mat_lim__ = T; | |
size_t lag_n_mat_lim__ = T; | |
for (size_t n_mat__ = 0; n_mat__ < lag_n_mat_lim__; ++n_mat__) { | |
for (size_t m_mat__ = 0; m_mat__ < lag_m_mat_lim__; ++m_mat__) { | |
lag(m_mat__,n_mat__) = vals_r__[pos__++]; | |
} | |
} | |
// validate data | |
try { | |
check_greater_or_equal(function__,K,0,"K"); | |
} catch (std::domain_error& e) { | |
throw std::domain_error(std::string("Invalid value of K: ") + std::string(e.what())); | |
}; | |
try { | |
check_greater_or_equal(function__,N,0,"N"); | |
} catch (std::domain_error& e) { | |
throw std::domain_error(std::string("Invalid value of N: ") + std::string(e.what())); | |
}; | |
try { | |
check_greater_or_equal(function__,T,0,"T"); | |
} catch (std::domain_error& e) { | |
throw std::domain_error(std::string("Invalid value of T: ") + std::string(e.what())); | |
}; | |
try { | |
check_greater_or_equal(function__,N_knots,0,"N_knots"); | |
} catch (std::domain_error& e) { | |
throw std::domain_error(std::string("Invalid value of N_knots: ") + std::string(e.what())); | |
}; | |
try { | |
check_greater_or_equal(function__,N_cores,0,"N_cores"); | |
} catch (std::domain_error& e) { | |
throw std::domain_error(std::string("Invalid value of N_cores: ") + std::string(e.what())); | |
}; | |
try { | |
check_greater_or_equal(function__,rho,0,"rho"); | |
} catch (std::domain_error& e) { | |
throw std::domain_error(std::string("Invalid value of rho: ") + std::string(e.what())); | |
}; | |
try { | |
check_greater_or_equal(function__,eta,0,"eta"); | |
} catch (std::domain_error& e) { | |
throw std::domain_error(std::string("Invalid value of eta: ") + std::string(e.what())); | |
}; | |
try { | |
check_greater_or_equal(function__,gamma,0,"gamma"); | |
check_less_or_equal(function__,gamma,1,"gamma"); | |
} catch (std::domain_error& e) { | |
throw std::domain_error(std::string("Invalid value of gamma: ") + std::string(e.what())); | |
}; | |
try { | |
check_greater_or_equal(function__,psi,0,"psi"); | |
} catch (std::domain_error& e) { | |
throw std::domain_error(std::string("Invalid value of psi: ") + std::string(e.what())); | |
}; | |
try { | |
check_greater_or_equal(function__,phi,0,"phi"); | |
} catch (std::domain_error& e) { | |
throw std::domain_error(std::string("Invalid value of phi: ") + std::string(e.what())); | |
}; | |
W = int(0); | |
stan::math::validate_non_negative_index("zeros", "(N_knots * T)", (N_knots * T)); | |
zeros = vector_d((N_knots * T)); | |
stan::math::validate_non_negative_index("ones", "(N * T)", (N * T)); | |
ones = vector_d((N * T)); | |
stan::math::validate_non_negative_index("Eye_T", "T", T); | |
stan::math::validate_non_negative_index("Eye_T", "T", T); | |
Eye_T = matrix_d(T,T); | |
stan::math::validate_non_negative_index("Eye_s", "N_knots", N_knots); | |
stan::math::validate_non_negative_index("Eye_s", "N_knots", N_knots); | |
Eye_s = matrix_d(N_knots,N_knots); | |
stan::math::validate_non_negative_index("C_s", "N_knots", N_knots); | |
stan::math::validate_non_negative_index("C_s", "N_knots", N_knots); | |
C_s = matrix_d(N_knots,N_knots); | |
stan::math::validate_non_negative_index("C_s_L", "N_knots", N_knots); | |
stan::math::validate_non_negative_index("C_s_L", "N_knots", N_knots); | |
C_s_L = matrix_d(N_knots,N_knots); | |
stan::math::validate_non_negative_index("C_s_inv", "N_knots", N_knots); | |
stan::math::validate_non_negative_index("C_s_inv", "N_knots", N_knots); | |
C_s_inv = matrix_d(N_knots,N_knots); | |
stan::math::assign(W, (K - 1)); | |
for (int i = 1; i <= (N_knots * T); ++i) { | |
stan::math::assign(get_base1_lhs(zeros,i,"zeros",1), 0.0); | |
} | |
for (int i = 1; i <= (N * T); ++i) { | |
stan::math::assign(get_base1_lhs(ones,i,"ones",1), 1.0); | |
} | |
for (int i = 1; i <= T; ++i) { | |
for (int j = 1; j <= T; ++j) { | |
if (as_bool(logical_eq(i,j))) { | |
stan::math::assign(get_base1_lhs(Eye_T,i,j,"Eye_T",1), 1.0); | |
} else { | |
stan::math::assign(get_base1_lhs(Eye_T,i,j,"Eye_T",1), 0); | |
} | |
} | |
} | |
for (int i = 1; i <= N_knots; ++i) { | |
for (int j = 1; j <= N_knots; ++j) { | |
if (as_bool(logical_eq(i,j))) { | |
stan::math::assign(get_base1_lhs(Eye_s,i,j,"Eye_s",1), 1.0); | |
} else { | |
stan::math::assign(get_base1_lhs(Eye_s,i,j,"Eye_s",1), 0); | |
} | |
} | |
} | |
stan::math::assign(C_s, exp(divide(stan::math::minus(d_knots),rho))); | |
stan::math::assign(C_s_L, cholesky_decompose(C_s)); | |
stan::math::assign(C_s_inv, transpose(mdivide_right_tri_low(transpose(mdivide_left_tri_low(C_s_L,Eye_s)),C_s_L))); | |
// validate transformed data | |
// set parameter ranges | |
num_params_r__ = 0U; | |
param_ranges_i__.clear(); | |
++num_params_r__; | |
num_params_r__ += W; | |
num_params_r__ += (N_knots * T) * W; | |
} | |
~pred_model() { } | |
void transform_inits(const stan::io::var_context& context__, | |
std::vector<int>& params_i__, | |
std::vector<double>& params_r__) const { | |
stan::io::writer<double> writer__(params_r__,params_i__); | |
size_t pos__; | |
(void) pos__; // dummy call to supress warning | |
std::vector<double> vals_r__; | |
std::vector<int> vals_i__; | |
if (!(context__.contains_r("tau"))) | |
throw std::runtime_error("variable tau missing"); | |
vals_r__ = context__.vals_r("tau"); | |
pos__ = 0U; | |
context__.validate_dims("initialization", "tau", "double", context__.to_vec()); | |
double tau(0); | |
tau = vals_r__[pos__++]; | |
try { writer__.scalar_lub_unconstrain(0,20,tau); } catch (std::exception& e) { throw std::runtime_error(std::string("Error transforming variable tau: ") + e.what()); } | |
if (!(context__.contains_r("mu"))) | |
throw std::runtime_error("variable mu missing"); | |
vals_r__ = context__.vals_r("mu"); | |
pos__ = 0U; | |
context__.validate_dims("initialization", "mu", "vector_d", context__.to_vec(W)); | |
vector_d mu(W); | |
for (int j1__ = 0U; j1__ < W; ++j1__) | |
mu(j1__) = vals_r__[pos__++]; | |
try { writer__.vector_unconstrain(mu); } catch (std::exception& e) { throw std::runtime_error(std::string("Error transforming variable mu: ") + e.what()); } | |
if (!(context__.contains_r("alpha"))) | |
throw std::runtime_error("variable alpha missing"); | |
vals_r__ = context__.vals_r("alpha"); | |
pos__ = 0U; | |
context__.validate_dims("initialization", "alpha", "vector_d", context__.to_vec(W,(N_knots * T))); | |
std::vector<vector_d> alpha(W,vector_d((N_knots * T))); | |
for (int j1__ = 0U; j1__ < (N_knots * T); ++j1__) | |
for (int i0__ = 0U; i0__ < W; ++i0__) | |
alpha[i0__](j1__) = vals_r__[pos__++]; | |
for (int i0__ = 0U; i0__ < W; ++i0__) | |
try { writer__.vector_unconstrain(alpha[i0__]); } catch (std::exception& e) { throw std::runtime_error(std::string("Error transforming variable alpha: ") + e.what()); } | |
params_r__ = writer__.data_r(); | |
params_i__ = writer__.data_i(); | |
} | |
void transform_inits(const stan::io::var_context& context, | |
Eigen::Matrix<double,Eigen::Dynamic,1>& params_r) const { | |
std::vector<double> params_r_vec; | |
std::vector<int> params_i_vec; | |
transform_inits(context, params_i_vec, params_r_vec); | |
params_r.resize(params_r_vec.size()); | |
for (int i = 0; i < params_r.size(); ++i) | |
params_r(i) = params_r_vec[i]; | |
} | |
///////////////////////////////////////////////////////////////////////////// | |
///////////////////////////////////////////////////////////////////////////// | |
inline double lub_transform(const double x, const double lb, const double ub, | |
double &lja, double &ja, double &dj) const | |
{ | |
double inv_logit_x; | |
if (x > 0) { | |
double exp_minus_x = exp(-x); | |
double exp_minus_x_p1 = exp_minus_x + 1.0; | |
inv_logit_x = 1.0 / (1.0 + exp_minus_x); | |
lja = log(ub - lb) - x - 2 * log1p(exp_minus_x); | |
ja = (ub - lb) * exp_minus_x / (exp_minus_x_p1 * exp_minus_x_p1); | |
dj = -1.0 + 2.0 * exp_minus_x / (1 + exp_minus_x); | |
if ((x < std::numeric_limits<double>::infinity()) | |
&& (inv_logit_x == 1)) | |
inv_logit_x = 1 - 1e-15; | |
} else { | |
double exp_x = exp(x); | |
double exp_x_p1 = exp_x + 1.0; | |
inv_logit_x = 1.0 - 1.0 / (1.0 + exp_x); | |
lja = log(ub - lb) + x - 2 * log1p(exp_x); | |
ja = (ub - lb) * exp_x / (exp_x_p1 * exp_x_p1); | |
dj = -1.0 + 2.0 / (exp_x + 1.0); | |
if ((x > -std::numeric_limits<double>::infinity()) | |
&& (inv_logit_x== 0)) | |
inv_logit_x = 1e-15; | |
} | |
return lb + (ub - lb) * inv_logit_x; | |
} | |
double normal_log_double(const vector_d& y, const double mu, const double sigma) const | |
{ | |
double logp = 0.0; | |
double inv_sigma = 1.0/sigma; | |
double log_sigma = log(sigma); | |
int size_y = y.size(); | |
for (int n = 0; n < size_y; n++) { | |
const double y_minus_mu_over_sigma = (y[n] - mu) * inv_sigma; | |
const double y_minus_mu_over_sigma_squared = y_minus_mu_over_sigma * y_minus_mu_over_sigma; | |
logp -= 0.5 * y_minus_mu_over_sigma_squared; | |
} | |
return logp; | |
} | |
double multi_normal_cholesky_log_double(const vector_d& y, | |
const vector_d& mu, | |
const matrix_d& L) const { | |
double lp(0.0); | |
vector_d L_log_diag = L.diagonal().array().log().matrix(); | |
lp -= sum(L_log_diag); | |
vector_d y_minus_mu = y.array() - mu.array(); | |
vector_d half = mdivide_left_tri_low(L, y_minus_mu); | |
lp -= 0.5 * dot_self(half); | |
return lp; | |
} | |
///////////////////////////////////////////////////////////////////////////// | |
///////////////////////////////////////////////////////////////////////////// | |
template <bool propto, bool jacobian> | |
// double log_prob_grad(const Eigen::Matrix<double, Eigen::Dynamic, 1>& params_r, | |
// Eigen::Matrix<double, Eigen::Dynamic, 1>& gradient, | |
// std::ostream* pstream = 0) const { | |
double log_prob_grad(Eigen::VectorXd& params_r, | |
Eigen::VectorXd& gradient, | |
std::ostream* pstream = 0) const { | |
double lp = 0.0; | |
struct timespec tstart, tend, _tic, _toc; clock_gettime(CLOCK_MONOTONIC, &tstart); | |
double seconds; | |
#define tic clock_gettime(CLOCK_MONOTONIC, &_tic); | |
#define toc(msg) clock_gettime(CLOCK_MONOTONIC, &_toc); \ | |
seconds = (_toc.tv_sec - _tic.tv_sec) \ | |
+ (_toc.tv_nsec - _tic.tv_nsec)*1e-9; \ | |
std::cout << " > " << msg << ": " \ | |
<< std::scientific << seconds \ | |
<< " seconds" << std::endl; | |
// | |
// unpack model parameters and constrain | |
// | |
vector<double> vec_params_r; | |
vector<int> vec_params_i; | |
vec_params_r.reserve(params_r.size()); | |
for (int i = 0; i < params_r.size(); ++i) | |
vec_params_r.push_back(params_r(i)); | |
stan::io::reader<double> in(vec_params_r, vec_params_i); | |
double tau, tau_lja, tau_ja, tau_dj; | |
tau = lub_transform(in.scalar(), 0.0, 20.0, tau_lja, tau_ja, tau_dj); | |
if (jacobian) | |
lp += tau_lja; | |
vector_d mu = in.vector_constrain(W); | |
std::cout << "---> " << tau << " " << mu.norm(); | |
vector<vector_d> alpha; | |
alpha.reserve(W); | |
for (int k = 0; k < W; ++k) { | |
if (jacobian) | |
alpha.push_back(in.vector_constrain((N_knots * T),lp)); | |
else | |
alpha.push_back(in.vector_constrain((N_knots * T))); | |
std::cout << " " << alpha[k].norm(); | |
} | |
std::cout << std::endl; | |
// | |
// compute log probability | |
// | |
lp += normal_log_double(mu, 0, 2); | |
tic; | |
const double tau_inv = 1.0 / tau; | |
const double eta_inv = 1.0 / eta; | |
matrix_d C_t = exp(-tau_inv * lag.array()).matrix(); | |
for (int j=0; j<C_t.cols(); ++j) | |
for (int i=0; i<C_t.rows(); ++i) | |
if (fabs(C_t(i,j)) < 1.0e-23) | |
C_t(i,j) = 0.0; | |
LLT<matrix_d> llt_t = C_t.llt(); | |
matrix_d C_t_L = llt_t.matrixL(); | |
matrix_d C_t_inv = llt_t.solve(Eigen::MatrixXd::Identity(T,T)); | |
LLT<matrix_d> llt_s = C_s.llt(); | |
matrix_d C_s_L = llt_s.matrixL(); | |
matrix_d C_s_inv = llt_s.solve(Eigen::MatrixXd::Identity(N_knots,N_knots)); | |
if (!C_t.allFinite()) throw std::runtime_error("C_t is not finite"); | |
if (!C_s.allFinite()) throw std::runtime_error("C_s is not finite"); | |
matrix_d C_star_inv = eta_inv * Eigen::kroneckerProduct(C_s_inv, C_t_inv); | |
matrix_d C_star = eta * Eigen::kroneckerProduct(C_s, C_t); | |
for (int j=0; j<C_star.cols(); ++j) | |
for (int i=0; i<C_star.rows(); ++i) | |
if (fabs(C_star(i,j)) < 1.0e-23) | |
C_star(i,j) = 0.0; | |
matrix_d c_s = exp(- 1.0/rho * d_inter.array()).matrix(); | |
matrix_d c = eta * Eigen::kroneckerProduct(c_s, C_t); | |
for (int j=0; j<c.cols(); ++j) | |
for (int i=0; i<c.rows(); ++i) | |
if (fabs(c(i,j)) < 1.0e-12) | |
c(i,j) = 0.0; | |
if (!C_star_inv.allFinite()) throw std::runtime_error("C_star_inv is not finite"); | |
if (!C_star.allFinite()) throw std::runtime_error("C_star is not finite"); | |
if (!c_s.allFinite()) throw std::runtime_error("c_s is not finite"); | |
if (!c.allFinite()) throw std::runtime_error("c is not finite"); | |
matrix_d tmp = sqrt(eta) * Eigen::kroneckerProduct(C_s_L, C_t_L); | |
matrix_d tmp2 = transpose(tmp); | |
TriangularView<matrix_d,Eigen::Lower> C_star_L = tmp.triangularView<Eigen::Lower>(); | |
TriangularView<matrix_d,Eigen::Upper> C_star_LT = tmp2.triangularView<Eigen::Upper>(); | |
vector<vector_d> C_star_inv_alpha(W); | |
vector<matrix_d> g(W); | |
matrix_d exp_g(N*T, W); | |
double lp_tmp[W]; | |
toc("mat "); | |
tic; | |
#pragma omp parallel for | |
for (int k=0; k < W; ++k) { | |
lp_tmp[k] = multi_normal_cholesky_log_double(alpha[k], zeros, C_star_L); | |
C_star_inv_alpha[k] = C_star_LT.solve(C_star_L.solve(alpha[k])); | |
g[k] = mu[k] * ones + c * C_star_inv_alpha[k]; | |
exp_g.col(k) = exp(g[k].array()); | |
} | |
#pragma omp end parallel | |
for (int k=0; k < W; ++k) | |
lp += lp_tmp[k]; | |
// if (!exp_g.allFinite()) throw std::runtime_error("exp_g is not finite"); | |
if (!exp_g.allFinite()) std::cout << "exp_g is not finite" << std::endl; | |
vector_d sum_exp_g = exp_g.rowwise().sum(); | |
matrix_d r(N*T,K); | |
for (int k = 0; k < W; ++k) | |
for (int i = 0; i < N*T; ++i) | |
r(i,k) = exp_g(i,k) / (1. + sum_exp_g(i)); | |
for (int i = 0; i < N*T; ++i) | |
r(i,W) = 1. / (1. + sum_exp_g(i)); | |
matrix_d r_new(N_cores * T, K); | |
matrix_d out_sum(N_cores*T,K); | |
vector_d sum_w(N*T); | |
int idx_core; | |
out_sum.fill(0); | |
for (int i = 0; i < N_cores; ++i) { | |
for (int t = 0; t < T; ++t) { | |
idx_core = (idx_cores[i] - 1) * T + t; | |
r_new.row(i * T + t) = gamma * r.row(idx_core); | |
for (int j = 0; j < N; ++j) { | |
if (d(idx_cores[i]-1,j) > 0) { | |
out_sum.row(i * T + t) += w(i,j) * r.row(j * T + t); | |
} | |
} | |
sum_w[i*T+t] = out_sum.row(i * T + t).sum(); | |
r_new.row(i * T + t) += out_sum.row(i *T + t) * (1 - gamma) / sum_w[i*T+t]; | |
} | |
} | |
toc("exp "); | |
tic; | |
// if (!r_new.allFinite()) throw std::runtime_error("r_new is not finite"); | |
vector<int> N_grains(N_cores*T); | |
vector_d A(N_cores*T); | |
matrix_d kappa(N_cores*T,K); | |
vector<int> y_row_sum(N_cores*T); | |
for (int i = 0; i < N_cores * T; ++i) { | |
y_row_sum[i] = 0.0; | |
for (int k = 0; k < K; ++k) | |
y_row_sum[i] += y[i][k]; | |
if (y_row_sum[i] > 0) { | |
for (int k = 0; k < K; ++k){ | |
kappa(i,k) = phi(k) * r_new(i,k); | |
} | |
A[i] = kappa.row(i).sum(); | |
N_grains[i] = y_row_sum[i]; | |
lp += lgamma(N_grains[i] + 1.0) + lgamma(A[i]) - lgamma(N_grains[i] + A[i]); | |
for (int k = 0; k < K; ++k) { | |
lp += -lgamma(y[i][k] + 1) + lgamma(y[i][k] + kappa(i,k)) - lgamma(kappa(i,k)); | |
} | |
} | |
} | |
// if (!kappa.allFinite()) throw std::runtime_error("kappa is not finite"); | |
toc("kap "); | |
tic; | |
// | |
// compute gradient | |
// | |
gradient.fill(0.0); | |
double tausq_inv = 1 / (tau * tau); | |
MatrixXd eyeNknots = MatrixXd::Identity(N_knots, N_knots); | |
// partials of mvn | |
//precompute these outisde the k loop | |
matrix_d a1 = eta * C_s; | |
matrix_d a2 = tausq_inv * (lag.array() * C_t.array()).matrix(); | |
matrix_d a3 = - C_t_inv * a2 * C_t_inv; | |
matrix_d a4 = kroneckerProduct(C_s_inv, a3); | |
gradient[0] += - W * 0.5 * (C_star_inv * kroneckerProduct(a1, a2)).trace(); | |
toc("g01 "); | |
tic; | |
for (int k=0; k<W; ++k) { | |
vector_d v = alpha[k]; | |
row_vector_d vT = alpha[k].transpose(); | |
gradient[0] += -0.5 * eta_inv * vT * (a4 * v); | |
for (int j=0; j<N_knots; ++j) | |
for (int t=0; t<T; ++t){ | |
int idx = 1 + W + k*N_knots*T + j*T + t; | |
gradient[idx] -= C_star_inv_alpha[k][j*T+t]; | |
} | |
} | |
gradient[0] = gradient[0] * tau_ja + tau_dj; | |
toc("g02 "); | |
tic; | |
// const matrix_d dg_mat = c * C_star_inv; | |
matrix_d dg_mat = (C_star_LT.solve(C_star_L.solve(c.transpose())) | |
).transpose(); | |
if (!dg_mat.allFinite()) std::cout << "dg_mat is not finite" << std::endl; | |
std::cout << "...> " << c.norm() << " " << c.maxCoeff() << " " << c.minCoeff() << std::endl; | |
toc("dg "); | |
tic; | |
// partial of dirmult | |
#pragma omp parallel for | |
for (int k=0; k<W; ++k) { | |
matrix_d cache(N*T,N_cores*T); | |
cache.fill(0.0); | |
for (int t=0; t<T; ++t) { | |
for (int i=0; i<N_cores; ++i) { | |
int si = i*T + t; | |
int ci = (idx_cores[i]-1)*T + t; | |
if (y_row_sum[si] > 0.0) { | |
double dirmultp1 = -digamma(A[si] + N_grains[si]) + digamma(A[si]); | |
double invsumw2 = 1 / (sum_w[si] * sum_w[si]); | |
for (int m=0; m<K; ++m) { | |
double dirmultp2 = digamma(y[si][m] + kappa(si,m)) - digamma(kappa(si,m)); | |
// this accumulates dkappa_itm | |
double dkappa = 0.0; | |
double drnew, dr, dg; | |
for (int c=0; c<N; ++c) { | |
int C = c*T + t; | |
const double sumgp1 = 1 + sum_exp_g[C]; | |
const double sumgp1inv2 = 1 / (sumgp1*sumgp1); | |
double tmp22 = 0.0; | |
for (int mp=0; mp<K; ++mp) { | |
// compute drnew = \partial r^{new}_{itm} / \partial r_{ctm'} | |
if (mp == m) { | |
drnew = (idx_cores[i]-1 == c) ? gamma : (1-gamma) * (w(i,c) * sum_w[si] - w(i,c) * out_sum(si,m)) * invsumw2; | |
} else { | |
drnew = (idx_cores[i]-1 == c) ? 0.0 : (1-gamma) * (-w(i,c) * out_sum(si,m)) * invsumw2 ; | |
} | |
// compute dr = \partial r_{ctm'} / \partial g{ctk} | |
if (mp == K-1) { | |
dr = -exp_g(C,k) * sumgp1inv2; | |
} else if (mp == k) { | |
dr = exp_g(C,mp) * (sumgp1 - exp_g(C,mp)) * sumgp1inv2; | |
} else { | |
dr = -exp_g(C,mp) * exp_g(C,k) * sumgp1inv2; | |
} | |
// accumulate dkappa | |
dkappa += phi[m] * drnew * dr; | |
// save (dirmultp1 + dirmultp2) * phi[m] * drnew * dr | |
cache(t*N+c,si) += (dirmultp1 + dirmultp2) * phi[m] * drnew * dr; | |
} // mp | |
} // c | |
gradient[k+1] += dirmultp1 * dkappa; | |
gradient[k+1] += dirmultp2 * dkappa; | |
} // m | |
} // if | |
} // i | |
} // t | |
for (int t=0; t<T; ++t) | |
for (int i=0; i<N_cores; ++i) | |
for (int v=0; v<N_knots; ++v) { | |
int idx = K + k*N_knots*T + v*T + t; | |
for (int c=0; c<N; ++c) | |
// XXX: dg_mat trashes the l1 read cache | |
gradient[idx] += cache(t*N+c,i*T+t) * dg_mat(c*T+t,v*T+t); | |
} | |
gradient[k+1] -= 0.25 * mu[k]; | |
} // k | |
#pragma omp end parallel | |
toc("grad"); | |
clock_gettime(CLOCK_MONOTONIC, &tend); | |
seconds = (tend.tv_sec - tstart.tv_sec) | |
+ (tend.tv_nsec - tstart.tv_nsec)*1e-9; | |
std::cout << "===> log_prob_grad took: " | |
<< std::scientific << seconds << " seconds" << std::endl; | |
return lp; | |
} // log_prob() | |
template <bool propto, bool jacobian> | |
double log_prob(vector<double>& params_r, | |
vector<int>& params_i, | |
std::ostream* pstream = 0) const { | |
Eigen::VectorXd evec_params_r(params_r.size()); | |
Eigen::VectorXd evec_gradient(params_r.size()); | |
for (int i=0; i<params_r.size(); i++) evec_params_r[i] = params_r[i]; | |
double lp = log_prob_grad<propto, jacobian>(evec_params_r, evec_gradient, pstream); | |
return lp; | |
} | |
template <bool propto, bool jacobian, typename T> | |
T log_prob(Eigen::Matrix<T,Eigen::Dynamic,1>& params_r, | |
std::ostream* pstream = 0) const { | |
std::vector<T> vec_params_r; | |
vec_params_r.reserve(params_r.size()); | |
for (int i = 0; i < params_r.size(); ++i) | |
vec_params_r.push_back(params_r(i)); | |
std::vector<int> vec_params_i; | |
return log_prob<propto,jacobian,T>(vec_params_r, vec_params_i, pstream); | |
} | |
template <bool propto__, bool jacobian__, typename T__> | |
T__ log_prob(vector<T__>& params_r__, | |
vector<int>& params_i__, | |
std::ostream* pstream__ = 0) const { | |
T__ DUMMY_VAR__(std::numeric_limits<double>::quiet_NaN()); | |
(void) DUMMY_VAR__; // suppress unused var warning | |
T__ lp__(0.0); | |
stan::math::accumulator<T__> lp_accum__; | |
// model parameters | |
stan::io::reader<T__> in__(params_r__,params_i__); | |
T__ tau; | |
(void) tau; // dummy to suppress unused var warning | |
if (jacobian__) | |
tau = in__.scalar_lub_constrain(0,20,lp__); | |
else | |
tau = in__.scalar_lub_constrain(0,20); | |
Eigen::Matrix<T__,Eigen::Dynamic,1> mu; | |
(void) mu; // dummy to suppress unused var warning | |
if (jacobian__) | |
mu = in__.vector_constrain(W,lp__); | |
else | |
mu = in__.vector_constrain(W); | |
vector<Eigen::Matrix<T__,Eigen::Dynamic,1> > alpha; | |
size_t dim_alpha_0__ = W; | |
alpha.reserve(dim_alpha_0__); | |
for (size_t k_0__ = 0; k_0__ < dim_alpha_0__; ++k_0__) { | |
if (jacobian__) | |
alpha.push_back(in__.vector_constrain((N_knots * T),lp__)); | |
else | |
alpha.push_back(in__.vector_constrain((N_knots * T))); | |
} | |
// transformed parameters | |
// initialized transformed params to avoid seg fault on val access | |
// validate transformed parameters | |
const char* function__ = "validate transformed params %1%"; | |
(void) function__; // dummy to suppress unused var warning | |
struct timespec tstart, tend; clock_gettime(CLOCK_MONOTONIC, &tstart); | |
{ | |
Eigen::Matrix<T__,Eigen::Dynamic,Eigen::Dynamic> C_t(T,T); | |
(void) C_t; // dummy to suppress unused var warning | |
stan::math::fill(C_t,DUMMY_VAR__); | |
vector<Eigen::Matrix<T__,Eigen::Dynamic,1> > g(W, (Eigen::Matrix<T__,Eigen::Dynamic,1> ((N * T)))); | |
stan::math::fill(g,DUMMY_VAR__); | |
Eigen::Matrix<T__,Eigen::Dynamic,1> sum_exp_g((N * T)); | |
(void) sum_exp_g; // dummy to suppress unused var warning | |
stan::math::fill(sum_exp_g,DUMMY_VAR__); | |
vector<Eigen::Matrix<T__,Eigen::Dynamic,1> > r((N * T), (Eigen::Matrix<T__,Eigen::Dynamic,1> (K))); | |
stan::math::fill(r,DUMMY_VAR__); | |
Eigen::Matrix<T__,Eigen::Dynamic,Eigen::Dynamic> C_t_inv(T,T); | |
(void) C_t_inv; // dummy to suppress unused var warning | |
stan::math::fill(C_t_inv,DUMMY_VAR__); | |
Eigen::Matrix<T__,Eigen::Dynamic,Eigen::Dynamic> C_t_L(T,T); | |
(void) C_t_L; // dummy to suppress unused var warning | |
stan::math::fill(C_t_L,DUMMY_VAR__); | |
Eigen::Matrix<T__,Eigen::Dynamic,Eigen::Dynamic> C_star_inv((N_knots * T),(N_knots * T)); | |
(void) C_star_inv; // dummy to suppress unused var warning | |
stan::math::fill(C_star_inv,DUMMY_VAR__); | |
Eigen::Matrix<T__,Eigen::Dynamic,Eigen::Dynamic> c((N * T),(N_knots * T)); | |
(void) c; // dummy to suppress unused var warning | |
stan::math::fill(c,DUMMY_VAR__); | |
Eigen::Matrix<T__,Eigen::Dynamic,1> H_alpha((N * T)); | |
(void) H_alpha; // dummy to suppress unused var warning | |
stan::math::fill(H_alpha,DUMMY_VAR__); | |
stan::math::initialize(C_t, DUMMY_VAR__); | |
stan::math::initialize(g, DUMMY_VAR__); | |
stan::math::initialize(sum_exp_g, DUMMY_VAR__); | |
stan::math::initialize(r, DUMMY_VAR__); | |
stan::math::initialize(C_t_inv, DUMMY_VAR__); | |
stan::math::initialize(C_t_L, DUMMY_VAR__); | |
stan::math::initialize(C_star_inv, DUMMY_VAR__); | |
stan::math::initialize(c, DUMMY_VAR__); | |
stan::math::initialize(H_alpha, DUMMY_VAR__); | |
lp_accum__.add(uniform_log<propto__>(tau, 0, 20)); | |
lp_accum__.add(normal_log<propto__>(mu, 0, 2)); | |
stan::math::assign(C_t, exp(divide(stan::math::minus(lag),tau))); | |
stan::math::assign(C_t_L, cholesky_decompose(C_t)); | |
stan::math::assign(C_t_inv, transpose(mdivide_right_tri_low(transpose(mdivide_left_tri_low(C_t_L,Eye_T)),C_t_L))); | |
for (int s = 1; s <= N_knots; ++s) { | |
for (int u = 1; u <= N_knots; ++u) { | |
{ | |
T__ tmp; | |
(void) tmp; // dummy to suppress unused var warning | |
stan::math::initialize(tmp, DUMMY_VAR__); | |
stan::math::assign(tmp, ((1 / eta) * get_base1(C_s_inv,s,u,"C_s_inv",1))); | |
for (int t = 1; t <= T; ++t) { | |
for (int v = 1; v <= T; ++v) { | |
stan::math::assign(get_base1_lhs(C_star_inv,(((s - 1) * T) + t),(((u - 1) * T) + v),"C_star_inv",1), (tmp * get_base1(C_t_inv,t,v,"C_t_inv",1))); | |
} | |
} | |
} | |
} | |
} | |
for (int s = 1; s <= N; ++s) { | |
for (int u = 1; u <= N_knots; ++u) { | |
{ | |
T__ tmp; | |
(void) tmp; // dummy to suppress unused var warning | |
stan::math::initialize(tmp, DUMMY_VAR__); | |
stan::math::assign(tmp, (eta * exp((-(get_base1(d_inter,s,u,"d_inter",1)) / rho)))); | |
for (int t = 1; t <= T; ++t) { | |
for (int v = 1; v <= T; ++v) { | |
stan::math::assign(get_base1_lhs(c,(((s - 1) * T) + t),(((u - 1) * T) + v),"c",1), (tmp * get_base1(C_t,t,v,"C_t",1))); | |
} | |
} | |
} | |
} | |
} | |
for (int k = 1; k <= W; ++k) { | |
lp_accum__.add(multi_normal_prec_log<propto__>(get_base1(alpha,k,"alpha",1), zeros, C_star_inv)); | |
stan::math::assign(get_base1_lhs(g,k,"g",1), add(multiply(get_base1(mu,k,"mu",1),ones),multiply(c,multiply(C_star_inv,get_base1(alpha,k,"alpha",1))))); | |
} | |
for (int i = 1; i <= (N * T); ++i) { | |
stan::math::assign(get_base1_lhs(sum_exp_g,i,"sum_exp_g",1), 0.0); | |
for (int k = 1; k <= W; ++k) { | |
stan::math::assign(get_base1_lhs(sum_exp_g,i,"sum_exp_g",1), (get_base1(sum_exp_g,i,"sum_exp_g",1) + exp(get_base1(get_base1(g,k,"g",1),i,"g",2)))); | |
} | |
} | |
if (pstream__) { | |
stan_print(pstream__,"sum_exp_g :"); | |
stan_print(pstream__,get_base1(sum_exp_g,1,"sum_exp_g",1)); | |
*pstream__ << std::endl; | |
} | |
for (int k = 1; k <= W; ++k) { | |
for (int i = 1; i <= (N * T); ++i) { | |
stan::math::assign(get_base1_lhs(get_base1_lhs(r,i,"r",1),k,"r",2), (exp(get_base1(get_base1(g,k,"g",1),i,"g",2)) / (1 + get_base1(sum_exp_g,i,"sum_exp_g",1)))); | |
} | |
} | |
for (int i = 1; i <= (N * T); ++i) { | |
stan::math::assign(get_base1_lhs(get_base1_lhs(r,i,"r",1),K,"r",2), (1 / (1 + get_base1(sum_exp_g,i,"sum_exp_g",1)))); | |
} | |
{ | |
vector<Eigen::Matrix<T__,Eigen::Dynamic,1> > r_new((N_cores * T), (Eigen::Matrix<T__,Eigen::Dynamic,1> (K))); | |
stan::math::fill(r_new,DUMMY_VAR__); | |
Eigen::Matrix<T__,Eigen::Dynamic,1> out_sum(K); | |
(void) out_sum; // dummy to suppress unused var warning | |
stan::math::fill(out_sum,DUMMY_VAR__); | |
T__ sum_w; | |
(void) sum_w; // dummy to suppress unused var warning | |
int idx_core(0); | |
(void) idx_core; // dummy to suppress unused var warning | |
stan::math::initialize(r_new, DUMMY_VAR__); | |
stan::math::initialize(out_sum, DUMMY_VAR__); | |
stan::math::initialize(sum_w, DUMMY_VAR__); | |
for (int i = 1; i <= N_cores; ++i) { | |
for (int t = 1; t <= T; ++t) { | |
stan::math::assign(idx_core, (((get_base1(idx_cores,i,"idx_cores",1) - 1) * T) + t)); | |
stan::math::assign(get_base1_lhs(r_new,(((i - 1) * T) + t),"r_new",1), multiply(gamma,get_base1(r,idx_core,"r",1))); | |
for (int k = 1; k <= K; ++k) { | |
stan::math::assign(get_base1_lhs(out_sum,k,"out_sum",1), 0); | |
} | |
stan::math::assign(sum_w, 0); | |
for (int j = 1; j <= N; ++j) { | |
if (as_bool(logical_gt(get_base1(d,get_base1(idx_cores,i,"idx_cores",1),j,"d",1),0))) { | |
stan::math::assign(out_sum, add(out_sum,multiply(get_base1(w,i,j,"w",1),get_base1(r,(((j - 1) * T) + t),"r",1)))); | |
} | |
} | |
stan::math::assign(sum_w, sum(out_sum)); | |
stan::math::assign(get_base1_lhs(r_new,(((i - 1) * T) + t),"r_new",1), add(get_base1(r_new,(((i - 1) * T) + t),"r_new",1),divide(multiply(out_sum,(1 - gamma)),sum_w))); | |
} | |
} | |
{ | |
T__ N_grains; | |
(void) N_grains; // dummy to suppress unused var warning | |
T__ A; | |
(void) A; // dummy to suppress unused var warning | |
Eigen::Matrix<T__,Eigen::Dynamic,1> kappa(K); | |
(void) kappa; // dummy to suppress unused var warning | |
stan::math::fill(kappa,DUMMY_VAR__); | |
stan::math::initialize(N_grains, DUMMY_VAR__); | |
stan::math::initialize(A, DUMMY_VAR__); | |
stan::math::initialize(kappa, DUMMY_VAR__); | |
for (int i = 1; i <= (N_cores * T); ++i) { | |
if (as_bool(logical_gt(sum(get_base1(y,i,"y",1)),0))) { | |
stan::math::assign(kappa, elt_multiply(phi,get_base1(r_new,i,"r_new",1))); | |
stan::math::assign(A, sum(kappa)); | |
stan::math::assign(N_grains, sum(get_base1(y,i,"y",1))); | |
// lp_accum__.add(((stan::math::lgamma((N_grains + 1)) + stan::math::lgamma(A)) - stan::math::lgamma((N_grains + A)))); | |
// for (int k = 1; k <= K; ++k) { | |
// lp_accum__.add(((-(stan::math::lgamma((get_base1(get_base1(y,i,"y",1),k,"y",2) + 1))) + stan::math::lgamma((get_base1(get_base1(y,i,"y",1),k,"y",2) + get_base1(kappa,k,"kappa",1)))) - stan::math::lgamma(get_base1(kappa,k,"kappa",1)))); | |
// } | |
} | |
} | |
} | |
} | |
} | |
lp_accum__.add(lp__); | |
clock_gettime(CLOCK_MONOTONIC, &tend); double seconds = (tend.tv_sec - tstart.tv_sec) + (tend.tv_nsec - tstart.tv_nsec)*1e-9; std::cout << "===> log_prob took: " << std::scientific << seconds << " seconds" << std::endl; return lp_accum__.sum(); | |
} // log_prob() | |
void get_param_names(std::vector<std::string>& names__) const { | |
names__.resize(0); | |
names__.push_back("tau"); | |
names__.push_back("mu"); | |
names__.push_back("alpha"); | |
} | |
void get_dims(std::vector<std::vector<size_t> >& dimss__) const { | |
dimss__.resize(0); | |
std::vector<size_t> dims__; | |
dims__.resize(0); | |
dimss__.push_back(dims__); | |
dims__.resize(0); | |
dims__.push_back(W); | |
dimss__.push_back(dims__); | |
dims__.resize(0); | |
dims__.push_back(W); | |
dims__.push_back((N_knots * T)); | |
dimss__.push_back(dims__); | |
} | |
template <typename RNG> | |
void write_array(RNG& base_rng__, | |
std::vector<double>& params_r__, | |
std::vector<int>& params_i__, | |
std::vector<double>& vars__, | |
bool include_tparams__ = true, | |
bool include_gqs__ = true, | |
std::ostream* pstream__ = 0) const { | |
vars__.resize(0); | |
stan::io::reader<double> in__(params_r__,params_i__); | |
static const char* function__ = "pred_model_namespace::write_array(%1%)"; | |
(void) function__; // dummy call to supress warning | |
// read-transform, write parameters | |
double tau = in__.scalar_lub_constrain(0,20); | |
vector_d mu = in__.vector_constrain(W); | |
vector<vector_d> alpha; | |
size_t dim_alpha_0__ = W; | |
for (size_t k_0__ = 0; k_0__ < dim_alpha_0__; ++k_0__) { | |
alpha.push_back(in__.vector_constrain((N_knots * T))); | |
} | |
vars__.push_back(tau); | |
for (int k_0__ = 0; k_0__ < W; ++k_0__) { | |
vars__.push_back(mu[k_0__]); | |
} | |
for (int k_1__ = 0; k_1__ < (N_knots * T); ++k_1__) { | |
for (int k_0__ = 0; k_0__ < W; ++k_0__) { | |
vars__.push_back(alpha[k_0__][k_1__]); | |
} | |
} | |
if (!include_tparams__) return; | |
// declare and define transformed parameters | |
double lp__ = 0.0; | |
(void) lp__; // dummy call to supress warning | |
stan::math::accumulator<double> lp_accum__; | |
// | |
// compute log probability | |
// | |
// validate transformed parameters | |
// write transformed parameters | |
if (!include_gqs__) return; | |
// declare and define generated quantities | |
// validate generated quantities | |
// write generated quantities | |
} | |
template <typename RNG> | |
void write_array(RNG& base_rng, | |
Eigen::Matrix<double,Eigen::Dynamic,1>& params_r, | |
Eigen::Matrix<double,Eigen::Dynamic,1>& vars, | |
bool include_tparams = true, | |
bool include_gqs = true, | |
std::ostream* pstream = 0) const { | |
std::vector<double> params_r_vec(params_r.size()); | |
for (int i = 0; i < params_r.size(); ++i) | |
params_r_vec[i] = params_r(i); | |
std::vector<double> vars_vec; | |
std::vector<int> params_i_vec; | |
write_array(base_rng,params_r_vec,params_i_vec,vars_vec,include_tparams,include_gqs,pstream); | |
vars.resize(vars_vec.size()); | |
for (int i = 0; i < vars.size(); ++i) | |
vars(i) = vars_vec[i]; | |
} | |
void write_csv_header(std::ostream& o__) const { | |
stan::io::csv_writer writer__(o__); | |
writer__.comma(); | |
o__ << "tau"; | |
for (int k_0__ = 1; k_0__ <= W; ++k_0__) { | |
writer__.comma(); | |
o__ << "mu" << '.' << k_0__; | |
} | |
for (int k_1__ = 1; k_1__ <= (N_knots * T); ++k_1__) { | |
for (int k_0__ = 1; k_0__ <= W; ++k_0__) { | |
writer__.comma(); | |
o__ << "alpha" << '.' << k_0__ << '.' << k_1__; | |
} | |
} | |
writer__.newline(); | |
} | |
template <typename RNG> | |
void write_csv(RNG& base_rng__, | |
std::vector<double>& params_r__, | |
std::vector<int>& params_i__, | |
std::ostream& o__, | |
std::ostream* pstream__ = 0) const { | |
stan::io::reader<double> in__(params_r__,params_i__); | |
stan::io::csv_writer writer__(o__); | |
static const char* function__ = "pred_model_namespace::write_csv(%1%)"; | |
(void) function__; // dummy call to supress warning | |
// read-transform, write parameters | |
double tau = in__.scalar_lub_constrain(0,20); | |
writer__.write(tau); | |
vector_d mu = in__.vector_constrain(W); | |
writer__.write(mu); | |
vector<vector_d> alpha; | |
size_t dim_alpha_0__ = W; | |
for (size_t k_0__ = 0; k_0__ < dim_alpha_0__; ++k_0__) { | |
alpha.push_back(in__.vector_constrain((N_knots * T))); | |
writer__.write(alpha[k_0__]); | |
} | |
// declare, define and validate transformed parameters | |
double lp__ = 0.0; | |
(void) lp__; // dummy call to supress warning | |
stan::math::accumulator<double> lp_accum__; | |
// write transformed parameters | |
// declare and define generated quantities | |
// validate generated quantities | |
// write generated quantities | |
writer__.newline(); | |
} | |
template <typename RNG> | |
void write_csv(RNG& base_rng, | |
Eigen::Matrix<double,Eigen::Dynamic,1>& params_r, | |
std::ostream& o, | |
std::ostream* pstream = 0) const { | |
std::vector<double> params_r_vec(params_r.size()); | |
for (int i = 0; i < params_r.size(); ++i) | |
params_r_vec[i] = params_r(i); | |
std::vector<int> params_i_vec; // dummy | |
write_csv(base_rng, params_r_vec, params_i_vec, o, pstream); | |
} | |
static std::string model_name() { | |
return "pred_model"; | |
} | |
void constrained_param_names(std::vector<std::string>& param_names__, | |
bool include_tparams__ = true, | |
bool include_gqs__ = true) const { | |
std::stringstream param_name_stream__; | |
param_name_stream__.str(std::string()); | |
param_name_stream__ << "tau"; | |
param_names__.push_back(param_name_stream__.str()); | |
for (int k_0__ = 1; k_0__ <= W; ++k_0__) { | |
param_name_stream__.str(std::string()); | |
param_name_stream__ << "mu" << '.' << k_0__; | |
param_names__.push_back(param_name_stream__.str()); | |
} | |
for (int k_1__ = 1; k_1__ <= (N_knots * T); ++k_1__) { | |
for (int k_0__ = 1; k_0__ <= W; ++k_0__) { | |
param_name_stream__.str(std::string()); | |
param_name_stream__ << "alpha" << '.' << k_0__ << '.' << k_1__; | |
param_names__.push_back(param_name_stream__.str()); | |
} | |
} | |
if (!include_gqs__ && !include_tparams__) return; | |
if (!include_gqs__) return; | |
} | |
void unconstrained_param_names(std::vector<std::string>& param_names__, | |
bool include_tparams__ = true, | |
bool include_gqs__ = true) const { | |
std::stringstream param_name_stream__; | |
param_name_stream__.str(std::string()); | |
param_name_stream__ << "tau"; | |
param_names__.push_back(param_name_stream__.str()); | |
for (int k_0__ = 1; k_0__ <= W; ++k_0__) { | |
param_name_stream__.str(std::string()); | |
param_name_stream__ << "mu" << '.' << k_0__; | |
param_names__.push_back(param_name_stream__.str()); | |
} | |
for (int k_1__ = 1; k_1__ <= (N_knots * T); ++k_1__) { | |
for (int k_0__ = 1; k_0__ <= W; ++k_0__) { | |
param_name_stream__.str(std::string()); | |
param_name_stream__ << "alpha" << '.' << k_0__ << '.' << k_1__; | |
param_names__.push_back(param_name_stream__.str()); | |
} | |
} | |
if (!include_gqs__ && !include_tparams__) return; | |
if (!include_gqs__) return; | |
} | |
}; // model | |
} // namespace | |
namespace stan { | |
namespace model { | |
template <bool propto, bool jacobian_adjust_transform> | |
double log_prob_grad(const pred_model_namespace::pred_model& model, | |
Eigen::VectorXd& params_r, | |
Eigen::VectorXd& gradient, | |
std::ostream* msgs = 0) { | |
double lp = model.template log_prob_grad<propto, jacobian_adjust_transform>(params_r, gradient, msgs); | |
return lp; | |
} | |
template <bool propto, bool jacobian_adjust_transform> | |
double log_prob_grad(const pred_model_namespace::pred_model& model, | |
std::vector<double>& params_r, | |
std::vector<int>& params_i, | |
std::vector<double>& gradient, | |
std::ostream* msgs = 0) { | |
Eigen::VectorXd evec_params_r(params_r.size()); | |
Eigen::VectorXd evec_gradient(params_r.size()); | |
for (int i=0; i<params_r.size(); i++) evec_params_r[i] = params_r[i]; | |
double lp = model.template log_prob_grad<propto, jacobian_adjust_transform>(evec_params_r, evec_gradient, msgs); | |
gradient.resize(params_r.size()); | |
for (int i=0; i<params_r.size(); i++) gradient[i] = evec_gradient[i]; | |
return lp; | |
} | |
} | |
} | |
#include <stan/gm/command.hpp> | |
int main(int argc, const char* argv[]) { | |
try { | |
return stan::gm::command<pred_model_namespace::pred_model>(argc,argv); | |
} catch (std::exception& e) { | |
std::cerr << std::endl << "Exception: " << e.what() << std::endl; | |
std::cerr << "Diagnostic information: " << std::endl << boost::diagnostic_information(e) << std::endl; | |
return -1; | |
} | |
} |
Sign up for free
to join this conversation on GitHub.
Already have an account?
Sign in to comment