Created
May 31, 2019 15:49
-
-
Save seantalts/e2da343fea48c645b3e38060993a5044 to your computer and use it in GitHub Desktop.
This file contains bidirectional Unicode text that may be interpreted or compiled differently than what appears below. To review, open the file in an editor that reveals hidden Unicode characters.
Learn more about bidirectional Unicode characters
// Code generated by Stan version 2.19.1 | |
#include <stan/model/model_header.hpp> | |
namespace low_dim_gauss_mix_model_namespace { | |
using std::istream; | |
using std::string; | |
using std::stringstream; | |
using std::vector; | |
using stan::io::dump; | |
using stan::math::lgamma; | |
using stan::model::prob_grad; | |
using namespace stan::math; | |
static int current_statement_begin__; | |
stan::io::program_reader prog_reader__() { | |
stan::io::program_reader reader; | |
reader.add_event(0, 0, "start", "low_dim_gauss_mix.stan"); | |
reader.add_event(22, 20, "end", "low_dim_gauss_mix.stan"); | |
return reader; | |
} | |
class low_dim_gauss_mix_model : public prob_grad { | |
private: | |
int N; | |
vector_d y; | |
public: | |
low_dim_gauss_mix_model(stan::io::var_context& context__, | |
std::ostream* pstream__ = 0) | |
: prob_grad(0) { | |
ctor_body(context__, 0, pstream__); | |
} | |
low_dim_gauss_mix_model(stan::io::var_context& context__, | |
unsigned int random_seed__, | |
std::ostream* pstream__ = 0) | |
: prob_grad(0) { | |
ctor_body(context__, random_seed__, pstream__); | |
} | |
void ctor_body(stan::io::var_context& context__, | |
unsigned int random_seed__, | |
std::ostream* pstream__) { | |
typedef double local_scalar_t__; | |
boost::ecuyer1988 base_rng__ = | |
stan::services::util::create_rng(random_seed__, 0); | |
(void) base_rng__; // suppress unused var warning | |
current_statement_begin__ = -1; | |
static const char* function__ = "low_dim_gauss_mix_model_namespace::low_dim_gauss_mix_model"; | |
(void) function__; // dummy to suppress unused var warning | |
size_t pos__; | |
(void) pos__; // dummy to suppress unused var warning | |
std::vector<int> vals_i__; | |
std::vector<double> vals_r__; | |
local_scalar_t__ DUMMY_VAR__(std::numeric_limits<double>::quiet_NaN()); | |
(void) DUMMY_VAR__; // suppress unused var warning | |
try { | |
// initialize data block variables from context__ | |
current_statement_begin__ = 2; | |
context__.validate_dims("data initialization", "N", "int", context__.to_vec()); | |
N = int(0); | |
vals_i__ = context__.vals_i("N"); | |
pos__ = 0; | |
N = vals_i__[pos__++]; | |
check_greater_or_equal(function__, "N", N, 0); | |
current_statement_begin__ = 3; | |
validate_non_negative_index("y", "N", N); | |
context__.validate_dims("data initialization", "y", "vector_d", context__.to_vec(N)); | |
y = Eigen::Matrix<double, Eigen::Dynamic, 1>(N); | |
vals_r__ = context__.vals_r("y"); | |
pos__ = 0; | |
size_t y_j_1_max__ = N; | |
for (size_t j_1__ = 0; j_1__ < y_j_1_max__; ++j_1__) { | |
y(j_1__) = vals_r__[pos__++]; | |
} | |
// initialize transformed data variables | |
// execute transformed data statements | |
// validate transformed data | |
// validate, set parameter ranges | |
num_params_r__ = 0U; | |
param_ranges_i__.clear(); | |
current_statement_begin__ = 7; | |
validate_non_negative_index("mu", "2", 2); | |
num_params_r__ += 2; | |
current_statement_begin__ = 8; | |
validate_non_negative_index("sigma", "2", 2); | |
num_params_r__ += (1 * 2); | |
current_statement_begin__ = 9; | |
num_params_r__ += 1; | |
} catch (const std::exception& e) { | |
stan::lang::rethrow_located(e, current_statement_begin__, prog_reader__()); | |
// Next line prevents compiler griping about no return | |
throw std::runtime_error("*** IF YOU SEE THIS, PLEASE REPORT A BUG ***"); | |
} | |
} | |
~low_dim_gauss_mix_model() { } | |
void transform_inits(const stan::io::var_context& context__, | |
std::vector<int>& params_i__, | |
std::vector<double>& params_r__, | |
std::ostream* pstream__) const { | |
typedef double local_scalar_t__; | |
stan::io::writer<double> writer__(params_r__, params_i__); | |
size_t pos__; | |
(void) pos__; // dummy call to supress warning | |
std::vector<double> vals_r__; | |
std::vector<int> vals_i__; | |
current_statement_begin__ = 7; | |
if (!(context__.contains_r("mu"))) | |
stan::lang::rethrow_located(std::runtime_error(std::string("Variable mu missing")), current_statement_begin__, prog_reader__()); | |
vals_r__ = context__.vals_r("mu"); | |
pos__ = 0U; | |
validate_non_negative_index("mu", "2", 2); | |
context__.validate_dims("parameter initialization", "mu", "vector_d", context__.to_vec(2)); | |
Eigen::Matrix<double, Eigen::Dynamic, 1> mu(2); | |
size_t mu_j_1_max__ = 2; | |
for (size_t j_1__ = 0; j_1__ < mu_j_1_max__; ++j_1__) { | |
mu(j_1__) = vals_r__[pos__++]; | |
} | |
try { | |
writer__.ordered_unconstrain(mu); | |
} catch (const std::exception& e) { | |
stan::lang::rethrow_located(std::runtime_error(std::string("Error transforming variable mu: ") + e.what()), current_statement_begin__, prog_reader__()); | |
} | |
current_statement_begin__ = 8; | |
if (!(context__.contains_r("sigma"))) | |
stan::lang::rethrow_located(std::runtime_error(std::string("Variable sigma missing")), current_statement_begin__, prog_reader__()); | |
vals_r__ = context__.vals_r("sigma"); | |
pos__ = 0U; | |
validate_non_negative_index("sigma", "2", 2); | |
context__.validate_dims("parameter initialization", "sigma", "double", context__.to_vec(2)); | |
std::vector<double> sigma(2, double(0)); | |
size_t sigma_k_0_max__ = 2; | |
for (size_t k_0__ = 0; k_0__ < sigma_k_0_max__; ++k_0__) { | |
sigma[k_0__] = vals_r__[pos__++]; | |
} | |
size_t sigma_i_0_max__ = 2; | |
for (size_t i_0__ = 0; i_0__ < sigma_i_0_max__; ++i_0__) { | |
try { | |
writer__.scalar_lb_unconstrain(0, sigma[i_0__]); | |
} catch (const std::exception& e) { | |
stan::lang::rethrow_located(std::runtime_error(std::string("Error transforming variable sigma: ") + e.what()), current_statement_begin__, prog_reader__()); | |
} | |
} | |
current_statement_begin__ = 9; | |
if (!(context__.contains_r("theta"))) | |
stan::lang::rethrow_located(std::runtime_error(std::string("Variable theta missing")), current_statement_begin__, prog_reader__()); | |
vals_r__ = context__.vals_r("theta"); | |
pos__ = 0U; | |
context__.validate_dims("parameter initialization", "theta", "double", context__.to_vec()); | |
double theta(0); | |
theta = vals_r__[pos__++]; | |
try { | |
writer__.scalar_lub_unconstrain(0, 1, theta); | |
} catch (const std::exception& e) { | |
stan::lang::rethrow_located(std::runtime_error(std::string("Error transforming variable theta: ") + e.what()), current_statement_begin__, prog_reader__()); | |
} | |
params_r__ = writer__.data_r(); | |
params_i__ = writer__.data_i(); | |
} | |
void transform_inits(const stan::io::var_context& context, | |
Eigen::Matrix<double, Eigen::Dynamic, 1>& params_r, | |
std::ostream* pstream__) const { | |
std::vector<double> params_r_vec; | |
std::vector<int> params_i_vec; | |
transform_inits(context, params_i_vec, params_r_vec, pstream__); | |
params_r.resize(params_r_vec.size()); | |
for (int i = 0; i < params_r.size(); ++i) | |
params_r(i) = params_r_vec[i]; | |
} | |
template <bool propto__, bool jacobian__, typename T__> | |
T__ log_prob(std::vector<T__>& params_r__, | |
std::vector<int>& params_i__, | |
std::ostream* pstream__ = 0) const { | |
typedef T__ local_scalar_t__; | |
local_scalar_t__ DUMMY_VAR__(std::numeric_limits<double>::quiet_NaN()); | |
(void) DUMMY_VAR__; // dummy to suppress unused var warning | |
T__ lp__(0.0); | |
stan::math::accumulator<T__> lp_accum__; | |
try { | |
stan::io::reader<local_scalar_t__> in__(params_r__, params_i__); | |
// model parameters | |
current_statement_begin__ = 7; | |
Eigen::Matrix<local_scalar_t__, Eigen::Dynamic, 1> mu; | |
(void) mu; // dummy to suppress unused var warning | |
if (jacobian__) | |
mu = in__.ordered_constrain(2, lp__); | |
else | |
mu = in__.ordered_constrain(2); | |
current_statement_begin__ = 8; | |
std::vector<local_scalar_t__> sigma; | |
size_t sigma_d_0_max__ = 2; | |
sigma.reserve(sigma_d_0_max__); | |
for (size_t d_0__ = 0; d_0__ < sigma_d_0_max__; ++d_0__) { | |
if (jacobian__) | |
sigma.push_back(in__.scalar_lb_constrain(0, lp__)); | |
else | |
sigma.push_back(in__.scalar_lb_constrain(0)); | |
} | |
current_statement_begin__ = 9; | |
local_scalar_t__ theta; | |
(void) theta; // dummy to suppress unused var warning | |
if (jacobian__) | |
theta = in__.scalar_lub_constrain(0, 1, lp__); | |
else | |
theta = in__.scalar_lub_constrain(0, 1); | |
// model body | |
current_statement_begin__ = 13; | |
lp_accum__.add(normal_log<propto__>(sigma, 0, 2)); | |
current_statement_begin__ = 14; | |
lp_accum__.add(normal_log<propto__>(mu, 0, 2)); | |
current_statement_begin__ = 15; | |
lp_accum__.add(beta_log<propto__>(theta, 5, 5)); | |
current_statement_begin__ = 16; | |
for (int n = 1; n <= N; ++n) { | |
current_statement_begin__ = 17; | |
lp_accum__.add(log_mix(theta, normal_log(get_base1(y, n, "y", 1), get_base1(mu, 1, "mu", 1), get_base1(sigma, 1, "sigma", 1)), normal_log(get_base1(y, n, "y", 1), get_base1(mu, 2, "mu", 1), get_base1(sigma, 2, "sigma", 1)))); | |
} | |
} catch (const std::exception& e) { | |
stan::lang::rethrow_located(e, current_statement_begin__, prog_reader__()); | |
// Next line prevents compiler griping about no return | |
throw std::runtime_error("*** IF YOU SEE THIS, PLEASE REPORT A BUG ***"); | |
} | |
lp_accum__.add(lp__); | |
return lp_accum__.sum(); | |
} // log_prob() | |
template <bool propto, bool jacobian, typename T_> | |
T_ log_prob(Eigen::Matrix<T_,Eigen::Dynamic,1>& params_r, | |
std::ostream* pstream = 0) const { | |
std::vector<T_> vec_params_r; | |
vec_params_r.reserve(params_r.size()); | |
for (int i = 0; i < params_r.size(); ++i) | |
vec_params_r.push_back(params_r(i)); | |
std::vector<int> vec_params_i; | |
return log_prob<propto,jacobian,T_>(vec_params_r, vec_params_i, pstream); | |
} | |
void get_param_names(std::vector<std::string>& names__) const { | |
names__.resize(0); | |
names__.push_back("mu"); | |
names__.push_back("sigma"); | |
names__.push_back("theta"); | |
} | |
void get_dims(std::vector<std::vector<size_t> >& dimss__) const { | |
dimss__.resize(0); | |
std::vector<size_t> dims__; | |
dims__.resize(0); | |
dims__.push_back(2); | |
dimss__.push_back(dims__); | |
dims__.resize(0); | |
dims__.push_back(2); | |
dimss__.push_back(dims__); | |
dims__.resize(0); | |
dimss__.push_back(dims__); | |
} | |
template <typename RNG> | |
void write_array(RNG& base_rng__, | |
std::vector<double>& params_r__, | |
std::vector<int>& params_i__, | |
std::vector<double>& vars__, | |
bool include_tparams__ = true, | |
bool include_gqs__ = true, | |
std::ostream* pstream__ = 0) const { | |
typedef double local_scalar_t__; | |
vars__.resize(0); | |
stan::io::reader<local_scalar_t__> in__(params_r__, params_i__); | |
static const char* function__ = "low_dim_gauss_mix_model_namespace::write_array"; | |
(void) function__; // dummy to suppress unused var warning | |
// read-transform, write parameters | |
Eigen::Matrix<double, Eigen::Dynamic, 1> mu = in__.ordered_constrain(2); | |
size_t mu_j_1_max__ = 2; | |
for (size_t j_1__ = 0; j_1__ < mu_j_1_max__; ++j_1__) { | |
vars__.push_back(mu(j_1__)); | |
} | |
std::vector<double> sigma; | |
size_t sigma_d_0_max__ = 2; | |
sigma.reserve(sigma_d_0_max__); | |
for (size_t d_0__ = 0; d_0__ < sigma_d_0_max__; ++d_0__) { | |
sigma.push_back(in__.scalar_lb_constrain(0)); | |
} | |
size_t sigma_k_0_max__ = 2; | |
for (size_t k_0__ = 0; k_0__ < sigma_k_0_max__; ++k_0__) { | |
vars__.push_back(sigma[k_0__]); | |
} | |
double theta = in__.scalar_lub_constrain(0, 1); | |
vars__.push_back(theta); | |
double lp__ = 0.0; | |
(void) lp__; // dummy to suppress unused var warning | |
stan::math::accumulator<double> lp_accum__; | |
local_scalar_t__ DUMMY_VAR__(std::numeric_limits<double>::quiet_NaN()); | |
(void) DUMMY_VAR__; // suppress unused var warning | |
if (!include_tparams__ && !include_gqs__) return; | |
try { | |
if (!include_gqs__ && !include_tparams__) return; | |
if (!include_gqs__) return; | |
} catch (const std::exception& e) { | |
stan::lang::rethrow_located(e, current_statement_begin__, prog_reader__()); | |
// Next line prevents compiler griping about no return | |
throw std::runtime_error("*** IF YOU SEE THIS, PLEASE REPORT A BUG ***"); | |
} | |
} | |
template <typename RNG> | |
void write_array(RNG& base_rng, | |
Eigen::Matrix<double,Eigen::Dynamic,1>& params_r, | |
Eigen::Matrix<double,Eigen::Dynamic,1>& vars, | |
bool include_tparams = true, | |
bool include_gqs = true, | |
std::ostream* pstream = 0) const { | |
std::vector<double> params_r_vec(params_r.size()); | |
for (int i = 0; i < params_r.size(); ++i) | |
params_r_vec[i] = params_r(i); | |
std::vector<double> vars_vec; | |
std::vector<int> params_i_vec; | |
write_array(base_rng, params_r_vec, params_i_vec, vars_vec, include_tparams, include_gqs, pstream); | |
vars.resize(vars_vec.size()); | |
for (int i = 0; i < vars.size(); ++i) | |
vars(i) = vars_vec[i]; | |
} | |
static std::string model_name() { | |
return "low_dim_gauss_mix_model"; | |
} | |
void constrained_param_names(std::vector<std::string>& param_names__, | |
bool include_tparams__ = true, | |
bool include_gqs__ = true) const { | |
std::stringstream param_name_stream__; | |
size_t mu_j_1_max__ = 2; | |
for (size_t j_1__ = 0; j_1__ < mu_j_1_max__; ++j_1__) { | |
param_name_stream__.str(std::string()); | |
param_name_stream__ << "mu" << '.' << j_1__ + 1; | |
param_names__.push_back(param_name_stream__.str()); | |
} | |
size_t sigma_k_0_max__ = 2; | |
for (size_t k_0__ = 0; k_0__ < sigma_k_0_max__; ++k_0__) { | |
param_name_stream__.str(std::string()); | |
param_name_stream__ << "sigma" << '.' << k_0__ + 1; | |
param_names__.push_back(param_name_stream__.str()); | |
} | |
param_name_stream__.str(std::string()); | |
param_name_stream__ << "theta"; | |
param_names__.push_back(param_name_stream__.str()); | |
if (!include_gqs__ && !include_tparams__) return; | |
if (include_tparams__) { | |
} | |
if (!include_gqs__) return; | |
} | |
void unconstrained_param_names(std::vector<std::string>& param_names__, | |
bool include_tparams__ = true, | |
bool include_gqs__ = true) const { | |
std::stringstream param_name_stream__; | |
size_t mu_j_1_max__ = 2; | |
for (size_t j_1__ = 0; j_1__ < mu_j_1_max__; ++j_1__) { | |
param_name_stream__.str(std::string()); | |
param_name_stream__ << "mu" << '.' << j_1__ + 1; | |
param_names__.push_back(param_name_stream__.str()); | |
} | |
size_t sigma_k_0_max__ = 2; | |
for (size_t k_0__ = 0; k_0__ < sigma_k_0_max__; ++k_0__) { | |
param_name_stream__.str(std::string()); | |
param_name_stream__ << "sigma" << '.' << k_0__ + 1; | |
param_names__.push_back(param_name_stream__.str()); | |
} | |
param_name_stream__.str(std::string()); | |
param_name_stream__ << "theta"; | |
param_names__.push_back(param_name_stream__.str()); | |
if (!include_gqs__ && !include_tparams__) return; | |
if (include_tparams__) { | |
} | |
if (!include_gqs__) return; | |
} | |
}; // model | |
} // namespace | |
typedef low_dim_gauss_mix_model_namespace::low_dim_gauss_mix_model stan_model; | |
Sign up for free
to join this conversation on GitHub.
Already have an account?
Sign in to comment