Generate stan code for an occupancy model

flocker_stancode(
  f_occ = NULL,
  f_det,
  flocker_data,
  data2 = NULL,
  multiseason = NULL,
  f_col = NULL,
  f_ex = NULL,
  multi_init = NULL,
  f_auto = NULL,
  augmented = FALSE,
  threads = NULL,
  ...
)

Arguments

f_occ

A brms-type model formula for occupancy. If provided, must begin with "~".

f_det

A brms-type model formula for detection. Must begin with "~". OR, a brmsformula object including formulas for all of the relevant distributional parameters in the desired model (det and at least one of occ, colo, ex, autologistic, and Omega). The $formula element of the brmsformula must be the detection formula, beginning with det ~. This latter option unadvisable except when necessary (e.g. when a nonlinear formula is desired), as input checking is less thorough.

flocker_data

data, generally the output of make_flocker_data().

data2

additional data (e.g. a covariance matrix for a phylogenetic effect)

multiseason

Must be NULL (the default) or one of "colex" or "autologistic". If NULL, data must be formatted for a single-season model. Otherwise, the data must be formatted for a multiseason model. If "colex", a colonization-extinction model will be fit, and `f_col` and `f_ex` must be specified. If "autologistic", an autologistic model will be fit, and `f_col` and `f_ex` must both be NULL.

f_col

A brms-type model formula for colonization in colonization-extinction dynamic models. If provided, must begin with "~".

f_ex

A brms-type model formula for extinction probabilities in colonization-extinction dynamic models. If provided, must begin with "~".

multi_init

Must be NULL unless the model is a dynamic (multiseason) model, in which case must be either "explicit" or "equilibrium". If "explicit", then `f_occ` must be provided to model occupancy probabilities in the first timestep. If "equilibrium", then `f_occ` must be `NULL` and the initial occupancy probabilities are assumed to be the (possibly site-specific) equilibrium probabilities from the colonization- extinction dynamics.

f_auto

Relevant only for autologistic models. A brms-type model formula for the autologistic offset parameter (theta). If provided, must begin with "~".

augmented

Logical. Must be TRUE if data are formatted for a data-augmented multi-species model, and FALSE otherwise.

threads

NULL or positive integer. If integer, the number of threads to use per chain in within chain parallelization. Currently available only with single-season rep-constant models, and must be set to NULL otherwise.

...

additional arguments passed to brms::brm()

Value

generated stancode

Examples

# \donttest{
sfd <- simulate_flocker_data()
fd <- make_flocker_data(
 sfd$obs, 
 sfd$unit_covs,
 sfd$event_covs
)
#> Formatting data for a single-season occupancy model. For details, see make_flocker_data_static. All warnings and error messages should be interpreted in the context of make_flocker_data_static
flocker_stancode(
  f_occ = ~ s(uc1) + (1|species),
  f_det = ~ uc1 + ec1 + (1|species),
  flocker_data = fd,
  refresh = 50, chains = 1, warmup = 5, iter = 200,
  control = list(adapt_engaged = FALSE, stepsize = .05, max_treedepth = 5),
  seed = 123
  )
#> // generated with brms 2.20.4
#> functions {
#>     real occupancy_single_lpmf(
#>     array[] int y, // detection data
#>     vector mu, // lin pred for detection
#>     vector occ, // lin pred for occupancy. Elements after vint1[1] irrelevant.
#>     array[] int vint1, // n units (n_unit). Elements after 1 irrelevant.
#>     array[] int vint2, // n sampling events per unit (n_rep). Elements after vint1[1] irrelevant.
#>     array[] int vint3, // Indicator for > 0 detections (Q). Elements after vint1[1] irrelevant.
#>   
#>   // indices for jth repeated sampling event to each unit (elements after vint1[1] irrelevant):
#>     array[] int vint4,
#>     array[] int vint5,
#>     array[] int vint6,
#>     array[] int vint7
#> ) {
#>   // Create array of the rep indices that correspond to each unit.
#>     array[vint1[1], 4] int index_array;
#>       index_array[,1] = vint4[1:vint1[1]];
#>       index_array[,2] = vint5[1:vint1[1]];
#>       index_array[,3] = vint6[1:vint1[1]];
#>       index_array[,4] = vint7[1:vint1[1]];
#> 
#>   // Initialize and compute log-likelihood
#>     real lp = 0;
#>     for (i in 1:vint1[1]) {
#>       array[vint2[i]] int indices = index_array[i, 1:vint2[i]];
#>       if (vint3[i] == 1) {
#>         lp += bernoulli_logit_lpmf(1 | occ[i]);
#>         lp += bernoulli_logit_lpmf(y[indices] | mu[indices]);
#>       }
#>       if (vint3[i] == 0) {
#>         lp += log_sum_exp(bernoulli_logit_lpmf(1 | occ[i]) + 
#>                               sum(log1m_inv_logit(mu[indices])), bernoulli_logit_lpmf(0 | occ[i]));
#>       }
#>     }
#>     return(lp);
#>   }
#> 
#> }
#> data {
#>   int<lower=1> N;  // total number of observations
#>   array[N] int Y;  // response variable
#>   // data for custom integer vectors
#>   array[N] int vint1;
#>   // data for custom integer vectors
#>   array[N] int vint2;
#>   // data for custom integer vectors
#>   array[N] int vint3;
#>   // data for custom integer vectors
#>   array[N] int vint4;
#>   // data for custom integer vectors
#>   array[N] int vint5;
#>   // data for custom integer vectors
#>   array[N] int vint6;
#>   // data for custom integer vectors
#>   array[N] int vint7;
#>   int<lower=1> K;  // number of population-level effects
#>   matrix[N, K] X;  // population-level design matrix
#>   int<lower=1> Kc;  // number of population-level effects after centering
#>   // data for splines
#>   int Ks_occ;  // number of linear effects
#>   matrix[N, Ks_occ] Xs_occ;  // design matrix for the linear effects
#>   // data for spline 1
#>   int nb_occ_1;  // number of bases
#>   array[nb_occ_1] int knots_occ_1;  // number of knots
#>   // basis function matrices
#>   matrix[N, knots_occ_1[1]] Zs_occ_1_1;
#>   // data for group-level effects of ID 1
#>   int<lower=1> N_1;  // number of grouping levels
#>   int<lower=1> M_1;  // number of coefficients per level
#>   array[N] int<lower=1> J_1;  // grouping indicator per observation
#>   // group-level predictor values
#>   vector[N] Z_1_1;
#>   // data for group-level effects of ID 2
#>   int<lower=1> N_2;  // number of grouping levels
#>   int<lower=1> M_2;  // number of coefficients per level
#>   array[N] int<lower=1> J_2;  // grouping indicator per observation
#>   // group-level predictor values
#>   vector[N] Z_2_occ_1;
#>   int prior_only;  // should the likelihood be ignored?
#> }
#> transformed data {
#>   matrix[N, Kc] Xc;  // centered version of X without an intercept
#>   vector[Kc] means_X;  // column means of X before centering
#>   for (i in 2:K) {
#>     means_X[i - 1] = mean(X[, i]);
#>     Xc[, i - 1] = X[, i] - means_X[i - 1];
#>   }
#> }
#> parameters {
#>   vector[Kc] b;  // regression coefficients
#>   real Intercept;  // temporary intercept for centered predictors
#>   real Intercept_occ;  // temporary intercept for centered predictors
#>   vector[Ks_occ] bs_occ;  // unpenalized spline coefficients
#>   // parameters for spline 1
#>   // standardized penalized spline coefficients
#>   vector[knots_occ_1[1]] zs_occ_1_1;
#>   vector<lower=0>[nb_occ_1] sds_occ_1;  // SDs of penalized spline coefficients
#>   vector<lower=0>[M_1] sd_1;  // group-level standard deviations
#>   array[M_1] vector[N_1] z_1;  // standardized group-level effects
#>   vector<lower=0>[M_2] sd_2;  // group-level standard deviations
#>   array[M_2] vector[N_2] z_2;  // standardized group-level effects
#> }
#> transformed parameters {
#>   // penalized spline coefficients
#>   vector[knots_occ_1[1]] s_occ_1_1;
#>   vector[N_1] r_1_1;  // actual group-level effects
#>   vector[N_2] r_2_occ_1;  // actual group-level effects
#>   real lprior = 0;  // prior contributions to the log posterior
#>   // compute penalized spline coefficients
#>   s_occ_1_1 = sds_occ_1[1] * zs_occ_1_1;
#>   r_1_1 = (sd_1[1] * (z_1[1]));
#>   r_2_occ_1 = (sd_2[1] * (z_2[1]));
#>   lprior += student_t_lpdf(Intercept | 3, 0, 2.5);
#>   lprior += student_t_lpdf(sds_occ_1 | 3, 0, 2.5)
#>     - 1 * student_t_lccdf(0 | 3, 0, 2.5);
#>   lprior += student_t_lpdf(sd_1 | 3, 0, 2.5)
#>     - 1 * student_t_lccdf(0 | 3, 0, 2.5);
#>   lprior += student_t_lpdf(sd_2 | 3, 0, 2.5)
#>     - 1 * student_t_lccdf(0 | 3, 0, 2.5);
#> }
#> model {
#>   // likelihood including constants
#>   if (!prior_only) {
#>     // initialize linear predictor term
#>     vector[N] mu = rep_vector(0.0, N);
#>     // initialize linear predictor term
#>     vector[N] occ = rep_vector(0.0, N);
#>     mu += Intercept + Xc * b;
#>     occ += Intercept_occ + Xs_occ * bs_occ + Zs_occ_1_1 * s_occ_1_1;
#>     for (n in 1:N) {
#>       // add more terms to the linear predictor
#>       mu[n] += r_1_1[J_1[n]] * Z_1_1[n];
#>     }
#>     for (n in 1:N) {
#>       // add more terms to the linear predictor
#>       occ[n] += r_2_occ_1[J_2[n]] * Z_2_occ_1[n];
#>     }
#>     target += occupancy_single_lpmf(Y | mu, occ, vint1, vint2, vint3, vint4, vint5, vint6, vint7);
#>   }
#>   // priors including constants
#>   target += lprior;
#>   target += std_normal_lpdf(zs_occ_1_1);
#>   target += std_normal_lpdf(z_1[1]);
#>   target += std_normal_lpdf(z_2[1]);
#> }
#> generated quantities {
#>   // actual population-level intercept
#>   real b_Intercept = Intercept - dot_product(means_X, b);
#>   // actual population-level intercept
#>   real b_occ_Intercept = Intercept_occ;
#> }
  # }