diff --git a/.github/workflows/R-CMD-check.yaml b/.github/workflows/R-CMD-check.yaml index 833e9f74..afa36db1 100644 --- a/.github/workflows/R-CMD-check.yaml +++ b/.github/workflows/R-CMD-check.yaml @@ -16,13 +16,13 @@ jobs: R-CMD-check: runs-on: ${{ matrix.config.os }} - name: ${{ matrix.config.rstan }} RStan - ${{ matrix.config.os }} (${{ matrix.config.r }}) + name: RStan - ${{ matrix.config.os }} (${{ matrix.config.r }}) strategy: fail-fast: false matrix: config: - - { os: macOS-latest, r: 'devel' } + - { os: macOS-latest, r: 'devel'} - { os: macOS-latest, r: 'release' } - { os: macOS-latest, r: 'oldrel' } - { os: windows-latest, r: 'devel' } @@ -41,14 +41,22 @@ jobs: - uses: n1hility/cancel-previous-runs@master with: token: ${{ secrets.GITHUB_TOKEN }} - if: "!startsWith(github.ref, 'refs/tags/') && github.ref != 'refs/heads/master'" - uses: actions/checkout@main - - uses: r-lib/actions/setup-r@v2-branch + - name: Use CRAN RStan/StanHeaders + uses: r-lib/actions/setup-r@v2-branch + if: matrix.config.r != 'devel' with: r-version: ${{ matrix.config.r }} + - name: Use Devel RStan/StanHeaders + uses: r-lib/actions/setup-r@v2-branch + if: matrix.config.r == 'devel' + with: + extra-repositories: 'https://stan-dev.r-universe.dev' + r-version: ${{ matrix.config.r }} + - uses: r-lib/actions/setup-pandoc@v2-branch - uses: r-lib/actions/setup-r-dependencies@v2-branch with: diff --git a/tests/testthat/test_stan_clogit.R b/tests/testthat/test_stan_clogit.R index e89bfc65..00880110 100644 --- a/tests/testthat/test_stan_clogit.R +++ b/tests/testthat/test_stan_clogit.R @@ -20,8 +20,8 @@ suppressPackageStartupMessages(library(rstanarm)) SEED <- 123 -ITER <- 500 -CHAINS <- 2 +ITER <- 1000 +CHAINS <- 4 CORES <- 1 REFRESH <- 0 diff --git a/tests/testthat/test_stan_glm.R b/tests/testthat/test_stan_glm.R index 533558ec..136ff18a 100644 --- a/tests/testthat/test_stan_glm.R +++ b/tests/testthat/test_stan_glm.R @@ -1,16 +1,16 @@ # Part of the rstanarm package for estimating model parameters # Copyright (C) 2015, 2016, 2017 Trustees of Columbia University -# +# # This program is free software; you can redistribute it and/or # modify it under the terms of the GNU General Public License # as published by the Free Software Foundation; either version 3 # of the License, or (at your option) any later version. -# +# # This program is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU General Public License for more details. -# +# # You should have received a copy of the GNU General Public License # along with this program; if not, write to the Free Software # Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA. @@ -18,14 +18,14 @@ context("stan_glm") suppressPackageStartupMessages(library(rstanarm)) -SEED <- 12345 +SEED <- 123 set.seed(SEED) CHAINS <- 2 ITER <- 40 # small iter for speed but large enough for psis REFRESH <- 0 SW( - fit_gaus <- stan_glm(mpg ~ wt, data = mtcars, + fit_gaus <- stan_glm(mpg ~ wt, data = mtcars, chains = CHAINS, iter = ITER, seed = SEED, refresh = 0) ) @@ -58,29 +58,29 @@ SW(fit_igaus <- update(fit_gamma, family = inverse.gaussian)) test_that("loo/waic for stan_glm works", { ll_fun <- rstanarm:::ll_fun - + # gaussian expect_equivalent_loo(fit_gaus) expect_identical(ll_fun(fit_gaus), rstanarm:::.ll_gaussian_i) - + # binomial expect_equivalent_loo(fit_binom) expect_equivalent_loo(fit_binom2) expect_identical(ll_fun(fit_binom), rstanarm:::.ll_binomial_i) expect_identical(ll_fun(fit_binom2), rstanarm:::.ll_binomial_i) - + # poisson expect_equivalent_loo(fit_pois) expect_identical(ll_fun(fit_pois), rstanarm:::.ll_poisson_i) - + # negative binomial expect_equivalent_loo(fit_negbin) expect_identical(ll_fun(fit_negbin), rstanarm:::.ll_neg_binomial_2_i) - + # gamma expect_equivalent_loo(fit_gamma) expect_identical(ll_fun(fit_gamma), rstanarm:::.ll_Gamma_i) - + # inverse gaussian expect_equivalent_loo(fit_igaus) expect_identical(ll_fun(fit_igaus), rstanarm:::.ll_inverse.gaussian_i) @@ -92,57 +92,57 @@ test_that("stan_glm throws appropriate errors, warnings, and messages", { treatment <- gl(3,3) dat <- data.frame(counts, outcome, treatment) f <- as.formula(counts ~ outcome + treatment) - + # error: glmer syntax - expect_error(stan_glm(counts ~ treatment + (1|outcome), data = dat), + expect_error(stan_glm(counts ~ treatment + (1|outcome), data = dat), regexp = "model formula not allowed") - + # error: empty model - expect_error(stan_glm(counts ~ 0, data = dat), + expect_error(stan_glm(counts ~ 0, data = dat), regexp = "No intercept or predictors specified") - + # error: stan_glm.nb with family argument - expect_error(stan_glm.nb(f, data = dat, family = "neg_binomial_2"), + expect_error(stan_glm.nb(f, data = dat, family = "neg_binomial_2"), regexp = "'family' should not be specified.") - + # error: prior and prior_intercept not lists - expect_error(stan_glm(f, data = dat, family = "poisson", prior = normal), + expect_error(stan_glm(f, data = dat, family = "poisson", prior = normal), regexp = "should be a named list") - expect_error(stan_glm(f, data = dat, family = "poisson", prior_intercept = normal), + expect_error(stan_glm(f, data = dat, family = "poisson", prior_intercept = normal), regexp = "should be a named list") - + # error: QR only with more than 1 predictor - expect_error(stan_glm(counts ~ 1, data = dat, family = "poisson", QR = TRUE), + expect_error(stan_glm(counts ~ 1, data = dat, family = "poisson", QR = TRUE), regexp = "'QR' can only be specified when there are multiple predictors") - + # error: QR and sparse - expect_error(stan_glm(f, data = dat, family = "poisson", QR = TRUE, sparse = TRUE), + expect_error(stan_glm(f, data = dat, family = "poisson", QR = TRUE, sparse = TRUE), regexp = "'QR' and 'sparse' cannot both be TRUE") - + # require intercept for certain family and link combinations expect_error(stan_glm(counts ~ -1 + outcome + treatment, data = dat, - family = poisson(link="identity"), seed = SEED), + family = poisson(link="identity"), seed = SEED), regexp = "model must have an intercept") expect_error(stan_glm(I(counts > 20) ~ -1 + outcome + treatment, data = dat, - family = binomial(link="log"), seed = SEED), + family = binomial(link="log"), seed = SEED), regexp = "model must have an intercept") - + # support of outcome variable - expect_error(stan_glm(cbind(1:10, runif(10)) ~ 1, data = dat, family = "binomial"), + expect_error(stan_glm(cbind(1:10, runif(10)) ~ 1, data = dat, family = "binomial"), "outcome values must be counts") - expect_error(stan_glm(c(1,2,1,2) ~ 1, data = dat, family = "binomial"), + expect_error(stan_glm(c(1,2,1,2) ~ 1, data = dat, family = "binomial"), "outcome values must be 0 or 1") - expect_error(stan_glm((-1):3 ~ 1, data = dat, family = "poisson"), + expect_error(stan_glm((-1):3 ~ 1, data = dat, family = "poisson"), "outcome values must be counts") - expect_error(stan_glm.nb(runif(3) ~ 1, data = dat), + expect_error(stan_glm.nb(runif(3) ~ 1, data = dat), "outcome values must be counts") - expect_error(stan_glm(0:3 ~ 1, data = dat, family = "Gamma"), + expect_error(stan_glm(0:3 ~ 1, data = dat, family = "Gamma"), "outcome values must be positive") - expect_error(stan_glm(runif(3, -2, -1) ~ 1, data = dat, family = "inverse.gaussian"), + expect_error(stan_glm(runif(3, -2, -1) ~ 1, data = dat, family = "inverse.gaussian"), "outcome values must be positive") - expect_error(stan_glm(cbind(1:10, 1:10) ~ 1, data = dat, family = "gaussian"), + expect_error(stan_glm(cbind(1:10, 1:10) ~ 1, data = dat, family = "gaussian"), "should not have multiple columns") - + # prior_aux can't be NULL if prior_PD is TRUE expect_error(stan_glm(mpg ~ wt, data = mtcars, prior_aux = NULL, prior_PD = TRUE), "'prior_aux' cannot be NULL if 'prior_PD' is TRUE") @@ -153,24 +153,24 @@ test_that("gaussian returns expected result for trees example", { links <- c("identity", "log", "inverse") for (i in 1:length(links)) { if (links[i] == "inverse") next # unreliable - fit <- stan_glm(Volume ~ log(Girth) + log(Height), data = trees, + fit <- stan_glm(Volume ~ log(Girth) + log(Height), data = trees, family = gaussian(link = links[i]), algorithm = "optimizing", prior = NULL, prior_intercept = NULL, refresh = 0, QR = TRUE, tol_rel_grad = 1e-16, seed = SEED) expect_stanreg(fit) - - ans <- glm(Volume ~ log(Girth) + log(Height),data = trees, + + ans <- glm(Volume ~ log(Girth) + log(Height),data = trees, family = gaussian(link = links[i])) expect_equal(coef(fit), coef(ans), tol = 0.021) } - - expect_error(update(fit, prior = dnorm), + + expect_error(update(fit, prior = dnorm), regexp = "should be a named list") - expect_error(update(fit, prior_intercept = dnorm), + expect_error(update(fit, prior_intercept = dnorm), regexp = "should be a named list") - expect_error(update(fit, prior = R2(0.5)), + expect_error(update(fit, prior = R2(0.5)), regexp = "should be one of") - expect_error(update(fit, prior_intercept = R2(0.5)), + expect_error(update(fit, prior_intercept = R2(0.5)), regexp = "should be one of") }) @@ -183,7 +183,7 @@ test_that("stan_glm returns expected result for glm poisson example", { prior = NULL, prior_intercept = NULL, QR = TRUE, algorithm = "optimizing", tol_rel_grad = 1e-16, seed = SEED)) expect_stanreg(fit) - + ans <- glm(counts ~ outcome + treatment, data = d.AD, family = poisson(links[i]), start = coef(fit)) if (links[i] == "log") expect_equal(coef(fit), coef(ans), tol = 0.03) @@ -200,13 +200,13 @@ test_that("stan_glm returns expected result for glm poisson example", { test_that("stan_glm returns something for glm negative binomial example", { skip_if_not_installed("MASS") - + for (i in 1:length(links)) { - SW(fit1 <- stan_glm(Days ~ Sex/(Age + Eth*Lrn), data = MASS::quine, - family = neg_binomial_2(links[i]), + SW(fit1 <- stan_glm(Days ~ Sex/(Age + Eth*Lrn), data = MASS::quine, + family = neg_binomial_2(links[i]), seed = SEED, chains = 1, iter = 100, QR = TRUE, refresh = 0)) - SW(fit2 <- stan_glm.nb(Days ~ Sex/(Age + Eth*Lrn), data = MASS::quine, + SW(fit2 <- stan_glm.nb(Days ~ Sex/(Age + Eth*Lrn), data = MASS::quine, link = links[i], seed = SEED, chains = 1, iter = 100, QR = TRUE, refresh = 0)) @@ -223,7 +223,7 @@ test_that("stan_glm returns expected result for cars example", { prior = NULL, prior_intercept = NULL, refresh = 0, tol_rel_obj = .Machine$double.eps, algorithm = "optimizing") expect_stanreg(fit) - + ans <- glm(log(dist) ~ log(speed), data = cars, family = gaussian(link = "identity")) expect_equal(coef(fit), coef(ans), tol = 0.1) }) @@ -234,7 +234,7 @@ test_that("stan_glm returns expected result with no intercept for mtcars example tol_rel_obj = .Machine$double.eps, algorithm = "optimizing", seed = SEED, sparse = TRUE) expect_stanreg(fit) - + ans <- glm(f, data = mtcars, family = gaussian(link = "identity")) expect_equal(coef(fit), coef(ans), tol = 0.04) }) @@ -257,7 +257,7 @@ test_that("stan_glm returns expected result for bernoulli", { tol_rel_obj = .Machine$double.eps, algorithm = "optimizing") ) expect_stanreg(fit) - + val <- coef(fit) if (links[i] != "log") { ans <- coef(glm(y ~ x, family = fam, etastart = theta)) @@ -279,19 +279,19 @@ test_that("stan_glm returns expected result for binomial example", { b <- c(0, 0.5, 0.1, -1.0) eta <- X %*% b b[1] <- -max(eta) - 0.05 - } + } else b <- c(0, 0.5, 0.1, -1.0) yes <- rbinom(N, size = trials, prob = fam$linkinv(X %*% b)) y <- cbind(yes, trials - yes) dat <- data.frame(yes, trials, x1 = X[,2], x2 = X[,3], x3 = X[,4]) SW( - fit <- stan_glm(cbind(yes, trials - yes) ~ x1 + x2 + x3, data = dat, + fit <- stan_glm(cbind(yes, trials - yes) ~ x1 + x2 + x3, data = dat, family = fam, seed = SEED, QR = TRUE, prior = NULL, prior_intercept = NULL, refresh = 0, tol_rel_obj = .Machine$double.eps, algorithm = "optimizing") ) expect_stanreg(fit) - + val <- coef(fit) ans <- coef(glm(y ~ x1 + x2 + x3, data = dat, family = fam, start = b)) if (links[i] != "log") expect_equal(val, ans, 0.02, info = links[i]) @@ -300,12 +300,12 @@ test_that("stan_glm returns expected result for binomial example", { prop <- yes / trials dat$prop <- prop SW( - fit2 <- stan_glm(prop ~ x1 + x2 + x3, data = dat, weights = trials, family = fam, + fit2 <- stan_glm(prop ~ x1 + x2 + x3, data = dat, weights = trials, family = fam, seed = SEED, refresh = 0, prior = NULL, prior_intercept = NULL, tol_rel_obj = .Machine$double.eps, algorithm = "optimizing") ) expect_stanreg(fit2) - + val2 <- coef(fit2) if (links[i] != "log") expect_equal(val2, ans, 0.02, info = links[i]) else expect_equal(val2[-1], ans[-1], 0.02, info = links[i]) @@ -313,12 +313,12 @@ test_that("stan_glm returns expected result for binomial example", { }) test_that("model with hs prior doesn't error", { - SW(fit <- stan_glm(mpg ~ ., data = mtcars, prior = hs(4, 2, .5), + SW(fit <- stan_glm(mpg ~ ., data = mtcars, prior = hs(4, 2, .5), seed = SEED, algorithm = "meanfield", QR = TRUE, refresh = 0)) expect_output(print(prior_summary(fit)), "~ hs(df = ", fixed = TRUE) }) -test_that("model with hs_plus prior doesn't error", { +test_that("model with hs_plus prior doesn't error", { # this works except on 32bit Windows skip_on_os("windows") SW(fit <- stan_glm(mpg ~ ., data = mtcars, prior = hs_plus(4, 1, 2, .5), @@ -327,58 +327,58 @@ test_that("model with hs_plus prior doesn't error", { }) test_that("model with laplace prior doesn't error", { - SW(fit <- stan_glm(mpg ~ ., data = mtcars, prior = laplace(), + SW(fit <- stan_glm(mpg ~ ., data = mtcars, prior = laplace(), seed = SEED, algorithm = "meanfield", refresh = 0)) - expect_output(print(prior_summary(fit)), + expect_output(print(prior_summary(fit)), "~ laplace(", fixed = TRUE) }) test_that("model with lasso prior doesn't error", { - SW(fit <- stan_glm(mpg ~ ., data = mtcars, prior = lasso(), + SW(fit <- stan_glm(mpg ~ ., data = mtcars, prior = lasso(), seed = SEED, algorithm = "meanfield", refresh = 0)) - expect_output(print(prior_summary(fit)), + expect_output(print(prior_summary(fit)), "~ lasso(", fixed = TRUE) -}) +}) test_that("model with product_normal prior doesn't error", { - SW(fit <- stan_glm(mpg ~ ., data = mtcars, - prior = product_normal(df = 3, scale = 0.5), + SW(fit <- stan_glm(mpg ~ ., data = mtcars, + prior = product_normal(df = 3, scale = 0.5), seed = SEED, algorithm = "meanfield", refresh = 0)) expect_output(print(prior_summary(fit)), "~ product_normal(df = ", fixed = TRUE) }) test_that("prior_aux argument is detected properly", { - SW(fit <- stan_glm(mpg ~ wt, data = mtcars, iter = 10, chains = 1, seed = SEED, - refresh = 0, prior_aux = exponential(5), - prior = normal(autoscale=FALSE), + SW(fit <- stan_glm(mpg ~ wt, data = mtcars, iter = 10, chains = 1, seed = SEED, + refresh = 0, prior_aux = exponential(5), + prior = normal(autoscale=FALSE), prior_intercept = normal(autoscale=FALSE))) expect_identical( - fit$prior.info$prior_aux, - list(dist = "exponential", - location = NULL, scale = NULL, + fit$prior.info$prior_aux, + list(dist = "exponential", + location = NULL, scale = NULL, adjusted_scale = NULL, #1/5 * sd(mtcars$mpg), - df = NULL, rate = 5, + df = NULL, rate = 5, aux_name = "sigma") ) - expect_output(print(prior_summary(fit)), + expect_output(print(prior_summary(fit)), "~ exponential(rate = ", fixed = TRUE) }) test_that("prior_aux can be NULL", { - SW(fit <- stan_glm(mpg ~ wt, data = mtcars, iter = 10, chains = 1, seed = SEED, + SW(fit <- stan_glm(mpg ~ wt, data = mtcars, iter = 10, chains = 1, seed = SEED, refresh = 0, prior_aux = NULL)) - expect_output(print(prior_summary(fit)), + expect_output(print(prior_summary(fit)), "~ flat", fixed = TRUE) }) test_that("autoscale works (insofar as it's reported by prior_summary)", { - SW(fit <- stan_glm(mpg ~ wt, data = mtcars, iter = 5, - prior = normal(autoscale=FALSE), - prior_intercept = normal(autoscale=FALSE), + SW(fit <- stan_glm(mpg ~ wt, data = mtcars, iter = 5, + prior = normal(autoscale=FALSE), + prior_intercept = normal(autoscale=FALSE), prior_aux = cauchy(autoscale=FALSE))) out <- capture.output(print(prior_summary(fit))) expect_false(any(grepl("adjusted", out))) - + SW(fit2 <- update(fit, prior = normal(autoscale=TRUE))) out <- capture.output(print(prior_summary(fit2))) expect_true(any(grepl("Adjusted", out))) @@ -386,7 +386,7 @@ test_that("autoscale works (insofar as it's reported by prior_summary)", { test_that("prior_options is deprecated", { expect_warning( - ops <- prior_options(scaled = FALSE, prior_scale_for_dispersion = 3), + ops <- prior_options(scaled = FALSE, prior_scale_for_dispersion = 3), "deprecated and will be removed" ) expect_warning( @@ -394,8 +394,8 @@ test_that("prior_options is deprecated", { "Setting prior scale for aux to value specified in 'prior_options'" ) expect_output( - print(prior_summary(fit)), - "~ exponential(rate = 0.33)", + print(prior_summary(fit)), + "~ exponential(rate = 0.33)", fixed = TRUE ) }) @@ -406,7 +406,7 @@ test_that("empty interaction levels dropped", { x1[x2 == 1] <- 1 x1[x2 == 2] <- 1 y <- rnorm(100) - expect_warning(stan_glm(y ~ x1*x2, chains = 1, iter = 20, refresh = 0), + expect_warning(stan_glm(y ~ x1*x2, chains = 1, iter = 20, refresh = 0), regexp = "Dropped empty interaction levels") }) @@ -414,19 +414,19 @@ test_that("empty interaction levels dropped", { test_that("posterior_predict compatible with glms", { check_for_pp_errors(fit_gaus) expect_linpred_equal(fit_gaus) - + mtcars2 <- mtcars mtcars2$offs <- runif(nrow(mtcars)) SW(fit2 <- stan_glm(mpg ~ wt + offset(offs), data = mtcars2, prior_intercept = NULL, prior = NULL, prior_aux = NULL, iter = ITER, chains = CHAINS, seed = SEED, refresh = 0)) - expect_warning(posterior_predict(fit2, newdata = mtcars2[1:5, ]), + expect_warning(posterior_predict(fit2, newdata = mtcars2[1:5, ]), "offset") check_for_pp_errors(fit_gaus, data = mtcars2, offset = mtcars2$offs) check_for_pp_errors(fit2, data = mtcars2, offset = mtcars2$offs) expect_linpred_equal(fit_gaus) # expect_linpred_equal(fit2) - + check_for_pp_errors(fit_pois) check_for_pp_errors(fit_negbin) expect_linpred_equal(fit_pois) @@ -436,14 +436,14 @@ test_that("posterior_predict compatible with glms", { check_for_pp_errors(fit_igaus) expect_linpred_equal(fit_gamma) expect_linpred_equal(fit_igaus) - + }) test_that("contrasts attribute isn't dropped", { contrasts <- list(wool = "contr.sum", tension = "contr.sum") SW(fit <- stan_glm(breaks ~ wool * tension, data = warpbreaks, - contrasts = contrasts, + contrasts = contrasts, chains = 1, refresh = 0)) expect_equal(fit$contrasts, contrasts) }) @@ -487,5 +487,5 @@ test_that("returns something with collinear predictors", { prior_intercept = normal(location = 0, scale = 0.1), chains = CHAINS, iter = ITER, refresh = REFRESH ) - expect_stanreg(fit_1) + expect_stanreg(fit_1) }) diff --git a/vignettes/continuous.Rmd b/vignettes/continuous.Rmd index dffe3160..16384556 100644 --- a/vignettes/continuous.Rmd +++ b/vignettes/continuous.Rmd @@ -205,10 +205,10 @@ in the __loo__ package: ```{r, continuous-kidiq-loo} # Compare them with loo -loo1 <- loo(post1, cores = 2) -loo2 <- loo(post2, cores = 2) -loo3 <- loo(post3, cores = 2) -loo4 <- loo(post4, cores = 2) +loo1 <- loo(post1, cores = 1) +loo2 <- loo(post2, cores = 1) +loo3 <- loo(post3, cores = 1) +loo4 <- loo(post4, cores = 1) (comp <- loo_compare(loo1, loo2, loo3, loo4)) ``` diff --git a/vignettes/count.Rmd b/vignettes/count.Rmd index da4e18d6..f427475a 100644 --- a/vignettes/count.Rmd +++ b/vignettes/count.Rmd @@ -223,8 +223,8 @@ When we comparing the models using the __loo__ package we also see a clear preference for the negative binomial model ```{r, count-roaches-loo} -loo1 <- loo(stan_glm1, cores = 2) -loo2 <- loo(stan_glm2, cores = 2) +loo1 <- loo(stan_glm1, cores = 1) +loo2 <- loo(stan_glm2, cores = 1) loo_compare(loo1, loo2) ```