From 2e100891930a2e6572386854bb30844f23789e0e Mon Sep 17 00:00:00 2001 From: Max Kuhn Date: Wed, 23 Oct 2024 16:48:49 -0400 Subject: [PATCH] expect_error -> expect_no_error (#953) --- tests/testthat/_snaps/checks.md | 8 +-- tests/testthat/test-autoplot.R | 2 +- tests/testthat/test-bayes.R | 5 +- tests/testthat/test-checks.R | 88 +++++++++++------------- tests/testthat/test-conf-mat-resampled.R | 16 +++-- tests/testthat/test-engine-parameters.R | 25 +++---- tests/testthat/test-extract.R | 31 ++++----- tests/testthat/test-finalization.R | 10 ++- tests/testthat/test-merge.R | 30 ++++---- tests/testthat/test-misc.R | 12 ++-- 10 files changed, 101 insertions(+), 126 deletions(-) diff --git a/tests/testthat/_snaps/checks.md b/tests/testthat/_snaps/checks.md index 32977008d..94524b2d0 100644 --- a/tests/testthat/_snaps/checks.md +++ b/tests/testthat/_snaps/checks.md @@ -291,22 +291,22 @@ # check parameter finalization Code - expect_error(p1 <- tune:::check_parameters(w1, data = mtcars, grid_names = character( - 0)), regex = NA) + expect_no_error(p1 <- tune:::check_parameters(w1, data = mtcars, grid_names = character( + 0))) Message i Creating pre-processing data to finalize unknown parameter: mtry --- Code - expect_error(p2 <- tune:::check_parameters(w2, data = mtcars), regex = NA) + expect_no_error(p2 <- tune:::check_parameters(w2, data = mtcars)) Message i Creating pre-processing data to finalize unknown parameter: mtry --- Code - expect_error(p3_a <- tune:::check_parameters(w3, data = mtcars), regex = NA) + expect_no_error(p3_a <- tune:::check_parameters(w3, data = mtcars)) Message i Creating pre-processing data to finalize unknown parameter: mtry diff --git a/tests/testthat/test-autoplot.R b/tests/testthat/test-autoplot.R index 37c9d009f..d64c56b48 100644 --- a/tests/testthat/test-autoplot.R +++ b/tests/testthat/test-autoplot.R @@ -206,7 +206,7 @@ test_that("coord_obs_pred", { p2 <- p + coord_obs_pred() - expect_error(print(p2), regexp = NA) + expect_no_error(print(p2)) expect_true(inherits(p2$coordinates, "CoordObsPred")) expect_equal(p2$coordinates$limits$x, rng) diff --git a/tests/testthat/test-bayes.R b/tests/testthat/test-bayes.R index d45237e84..59bd5ff3b 100644 --- a/tests/testthat/test-bayes.R +++ b/tests/testthat/test-bayes.R @@ -57,7 +57,7 @@ test_that("tune recipe only", { expect_null(.get_tune_eval_time_target(res)) set.seed(1) - expect_error( + expect_no_error( suppressMessages( tune_bayes( wflow, @@ -67,8 +67,7 @@ test_that("tune recipe only", { iter = iter2, corr = list(type = "matern", nu = 3 / 2) ) - ), - regexp = NA + ) ) diff --git a/tests/testthat/test-checks.R b/tests/testthat/test-checks.R index abd32d3bc..4a88ff2e5 100644 --- a/tests/testthat/test-checks.R +++ b/tests/testthat/test-checks.R @@ -3,7 +3,7 @@ test_that("rsample objects", { obj_loo <- rsample::loo_cv(mtcars) obj_nst <- rsample::nested_cv(mtcars, obj_cv, inside = rsample::bootstraps()) obj_permut <- rsample::permutations(mtcars, hp) - expect_error(tune:::check_rset(obj_cv), regexp = NA) + expect_no_error(tune:::check_rset(obj_cv)) expect_snapshot(error = TRUE, tune:::check_rset(obj_loo)) expect_snapshot(error = TRUE, tune:::check_rset(obj_nst)) expect_snapshot(error = TRUE, tune:::check_rset(obj_permut)) @@ -69,7 +69,7 @@ test_that("grid objects", { add_model(svm_mod) %>% add_recipe(bare_rec) - expect_error(grid_2 <- tune:::check_grid(6, wflow_1), NA) + expect_no_error(grid_2 <- tune:::check_grid(6, wflow_1)) expect_equal(nrow(grid_2), 6) expect_true(inherits(grid_2, "data.frame")) @@ -226,17 +226,15 @@ test_that("workflow objects (will not tune, tidymodels/tune#548)", { lr_glmnet_2 <- lr_lm_2 %>% parsnip::set_engine("glmnet") # don't error when supplied tune args make sense given engine / steps - expect_error_na <- function(x) {testthat::expect_error(x, regexp = NA)} + expect_no_error(check_workflow(workflow(rec_bare, lr_lm_0))) + expect_no_error(check_workflow(workflow(rec_bare, lr_glmnet_0))) + expect_no_error(check_workflow(workflow(rec_bare, lr_glmnet_1))) + expect_no_error(check_workflow(workflow(rec_bare, lr_glmnet_2))) - expect_error_na(check_workflow(workflow(rec_bare, lr_lm_0))) - expect_error_na(check_workflow(workflow(rec_bare, lr_glmnet_0))) - expect_error_na(check_workflow(workflow(rec_bare, lr_glmnet_1))) - expect_error_na(check_workflow(workflow(rec_bare, lr_glmnet_2))) - - expect_error_na(check_workflow(workflow(rec_tune, lr_lm_0))) - expect_error_na(check_workflow(workflow(rec_tune, lr_glmnet_0))) - expect_error_na(check_workflow(workflow(rec_tune, lr_glmnet_1))) - expect_error_na(check_workflow(workflow(rec_tune, lr_glmnet_2))) + expect_no_error(check_workflow(workflow(rec_tune, lr_lm_0))) + expect_no_error(check_workflow(workflow(rec_tune, lr_glmnet_0))) + expect_no_error(check_workflow(workflow(rec_tune, lr_glmnet_1))) + expect_no_error(check_workflow(workflow(rec_tune, lr_glmnet_2))) # error when supplied tune args don't make sense given engine / steps expect_error_nt <- function(x) {testthat::expect_error(x, class = "not_tunable_error")} @@ -321,7 +319,7 @@ test_that("metrics must match the parsnip engine", { # ------------------------------------------------------------------------------ test_that("grid control objects", { - expect_error(control_grid(), NA) + expect_no_error(control_grid()) expect_snapshot(error = TRUE, control_grid(tomato = 1)) expect_snapshot(error = TRUE, control_grid(verbose = 1)) expect_snapshot(error = TRUE, control_grid(verbose = rep(TRUE, 2))) @@ -330,18 +328,18 @@ test_that("grid control objects", { expect_snapshot(error = TRUE, control_grid(extract = Inf)) expect_snapshot(error = TRUE, control_grid(pkgs = Inf)) - expect_error(control_grid(verbose = TRUE), NA) - expect_error(control_grid(allow_par = FALSE), NA) - expect_error(control_grid(save_pred = TRUE), NA) - expect_error(control_grid(extract = NULL), NA) - expect_error(control_grid(extract = I), NA) - expect_error(control_grid(pkgs = NULL), NA) - expect_error(control_grid(pkgs = letters), NA) + expect_no_error(control_grid(verbose = TRUE)) + expect_no_error(control_grid(allow_par = FALSE)) + expect_no_error(control_grid(save_pred = TRUE)) + expect_no_error(control_grid(extract = NULL)) + expect_no_error(control_grid(extract = I)) + expect_no_error(control_grid(pkgs = NULL)) + expect_no_error(control_grid(pkgs = letters)) expect_s3_class(control_grid(), c("control_grid", "control_resamples")) }) test_that("Bayes control objects", { - expect_error(control_bayes(), NA) + expect_no_error(control_bayes()) expect_snapshot(error = TRUE, control_bayes(tomato = 1)) expect_snapshot(error = TRUE, control_bayes(verbose = 1)) expect_snapshot(error = TRUE, control_bayes(verbose = rep(TRUE, 2))) @@ -357,15 +355,15 @@ test_that("Bayes control objects", { tmp <- control_bayes(no_improve = 2, uncertain = 5) ) - expect_error(control_bayes(verbose = TRUE), NA) - expect_error(control_bayes(no_improve = 2), NA) - expect_error(control_bayes(uncertain = 2), NA) - expect_error(control_bayes(save_pred = TRUE), NA) - expect_error(control_bayes(extract = NULL), NA) - expect_error(control_bayes(extract = I), NA) - expect_error(control_bayes(pkgs = NULL), NA) - expect_error(control_bayes(pkgs = letters), NA) - expect_error(control_bayes(time_limit = 2), NA) + expect_no_error(control_bayes(verbose = TRUE)) + expect_no_error(control_bayes(no_improve = 2)) + expect_no_error(control_bayes(uncertain = 2)) + expect_no_error(control_bayes(save_pred = TRUE)) + expect_no_error(control_bayes(extract = NULL)) + expect_no_error(control_bayes(extract = I)) + expect_no_error(control_bayes(pkgs = NULL)) + expect_no_error(control_bayes(pkgs = letters)) + expect_no_error(control_bayes(time_limit = 2)) expect_s3_class(control_bayes(), "control_bayes") }) @@ -448,16 +446,14 @@ test_that("check parameter finalization", { add_model(rf1) expect_snapshot( - expect_error( - p1 <- tune:::check_parameters(w1, data = mtcars, grid_names = character(0)), - regex = NA + expect_no_error( + p1 <- tune:::check_parameters(w1, data = mtcars, grid_names = character(0)) ) ) expect_false(any(dials::has_unknowns(p1$object))) - expect_error( - p1 <- tune:::check_parameters(w1, data = mtcars, grid_names = "mtry"), - regex = NA + expect_no_error( + p1 <- tune:::check_parameters(w1, data = mtcars, grid_names = "mtry") ) w2 <- @@ -466,9 +462,8 @@ test_that("check parameter finalization", { add_model(rf1) expect_snapshot( - expect_error( - p2 <- tune:::check_parameters(w2, data = mtcars), - regex = NA + expect_no_error( + p2 <- tune:::check_parameters(w2, data = mtcars) ) ) expect_false(any(dials::has_unknowns(p2$object))) @@ -480,9 +475,8 @@ test_that("check parameter finalization", { p3 <- extract_parameter_set_dials(w3) expect_snapshot( - expect_error( - p3_a <- tune:::check_parameters(w3, data = mtcars), - regex = NA + expect_no_error( + p3_a <- tune:::check_parameters(w3, data = mtcars) ) ) expect_false(any(dials::has_unknowns(p3_a$object))) @@ -500,9 +494,8 @@ test_that("check parameter finalization", { extract_parameter_set_dials(w4) %>% update(mtry = dials::mtry(c(1, 10))) - expect_error( - p4_b <- tune:::check_parameters(w4, p4_a, data = mtcars), - regex = NA + expect_no_error( + p4_b <- tune:::check_parameters(w4, p4_a, data = mtcars) ) expect_true(inherits(p4_b, "parameters")) @@ -511,9 +504,8 @@ test_that("check parameter finalization", { add_recipe(rec_tune) %>% add_model(lm1) - expect_error( - p5 <- tune:::check_parameters(w5, data = mtcars), - regex = NA + expect_no_error( + p5 <- tune:::check_parameters(w5, data = mtcars) ) expect_true(inherits(p5, "parameters")) }) diff --git a/tests/testthat/test-conf-mat-resampled.R b/tests/testthat/test-conf-mat-resampled.R index a2c04e2ab..4ae3000f0 100644 --- a/tests/testthat/test-conf-mat-resampled.R +++ b/tests/testthat/test-conf-mat-resampled.R @@ -1,15 +1,19 @@ test_that("appropriate return values", { svm_results <- readRDS(test_path("data", "svm_results.rds")) - expect_error( - cm_1 <- conf_mat_resampled(svm_results, parameters = select_best(svm_results, metric = "accuracy")), - regex = NA + expect_no_error( + cm_1 <- + conf_mat_resampled(svm_results, parameters = select_best(svm_results, metric = "accuracy")) ) expect_true(tibble::is_tibble(cm_1)) - expect_error( - cm_2 <- conf_mat_resampled(svm_results, parameters = select_best(svm_results, metric = "accuracy"), tidy = FALSE), - regex = NA + expect_no_error( + cm_2 <- + conf_mat_resampled( + svm_results, + parameters = select_best(svm_results, metric = "accuracy"), + tidy = FALSE + ) ) expect_equal(class(cm_2), "conf_mat") diff --git a/tests/testthat/test-engine-parameters.R b/tests/testthat/test-engine-parameters.R index b0af729cc..956aa1926 100644 --- a/tests/testthat/test-engine-parameters.R +++ b/tests/testthat/test-engine-parameters.R @@ -35,27 +35,23 @@ test_that("tuning with engine parameters with dials objects", { rs <- rsample::vfold_cv(mtcars) set.seed(19828) - expect_error( + expect_no_error( suppressMessages( rf_tune <- rf_mod %>% tune_grid(mpg ~ ., resamples = rs, grid = 3) - ), - regex = NA + ) ) - expect_error( - p <- autoplot(rf_tune), - regex = NA + expect_no_error( + p <- autoplot(rf_tune) ) set.seed(283) - expect_error( + expect_no_error( suppressMessages( rf_search <- rf_mod %>% tune_bayes(mpg ~ ., resamples = rs, initial = 3, iter = 2) - ), - regex = NA + ) ) - expect_error( - p <- autoplot(rf_search), - regex = NA + expect_no_error( + p <- autoplot(rf_search) ) }) @@ -89,11 +85,10 @@ test_that("tuning with engine parameters without dials objects", { ## --------------------------------------------------------------------------- - expect_error( + expect_no_error( suppressMessages( rf_tune <- rf_mod %>% tune_grid(mpg ~ ., resamples = rs, grid = grid) - ), - regex = NA + ) ) expect_snapshot(error = TRUE, { p <- autoplot(rf_tune) diff --git a/tests/testthat/test-extract.R b/tests/testthat/test-extract.R index 2cb513b51..55595a840 100644 --- a/tests/testthat/test-extract.R +++ b/tests/testthat/test-extract.R @@ -10,17 +10,16 @@ test_that("tune recipe only", { extract_recipe(x) %>% tidy(number = 2) } before_kind <- RNGkind()[[1]] - expect_error( + expect_no_error( res_1_1 <- workflow() %>% add_recipe(helper_objects$rec_tune_1) %>% add_model(helper_objects$lm_mod) %>% - tune_grid(resamples = mt_folds, control = control_grid(extract = extr_1_1)), - NA + tune_grid(resamples = mt_folds, control = control_grid(extract = extr_1_1)) ) after_kind <- RNGkind()[[1]] expect_equal(before_kind, after_kind) - expect_error(extract_1_1 <- dplyr::bind_rows(res_1_1$.extracts), NA) + expect_no_error(extract_1_1 <- dplyr::bind_rows(res_1_1$.extracts)) expect_true(all(names(extract_1_1) == c("num_comp", ".extracts", ".config"))) expect_true( @@ -42,7 +41,7 @@ test_that("tune model only", { tibble(index = mod@alphaindex[[1]], estimate = mod@coef[[1]]) } - expect_error( + expect_no_error( res_2_1 <- workflow() %>% add_recipe(helper_objects$rec_no_tune_1) %>% @@ -51,10 +50,9 @@ test_that("tune model only", { resamples = mt_folds, grid = 2, control = control_grid(extract = extr_2_1) - ), - NA + ) ) - expect_error(extract_2_1 <- dplyr::bind_rows(res_2_1$.extracts), NA) + expect_no_error(extract_2_1 <- dplyr::bind_rows(res_2_1$.extracts)) expect_true(all(names(extract_2_1) == c("cost", ".extracts", ".config"))) expect_true( @@ -69,7 +67,7 @@ test_that("tune model only", { } # should not fail: - expect_error( + expect_no_error( res_2_2 <- workflow() %>% add_recipe(helper_objects$rec_tune_1) %>% @@ -78,15 +76,13 @@ test_that("tune model only", { resamples = mt_folds, grid = 2, control = control_grid(extract = extr_2_2) - ), - NA + ) ) - expect_error( + expect_no_error( extract_2_2 <- dplyr::bind_rows(res_2_2$.extracts) %>% - tidyr::unnest(cols = c(.extracts)), - NA + tidyr::unnest(cols = c(.extracts)) ) expect_true(all(!extract_2_2$is_null_rec)) }) @@ -178,16 +174,15 @@ test_that("tune model and recipe", { update(num_comp = dials::num_comp(c(2, 5))) %>% dials::grid_space_filling(size = 4) - expect_error( + expect_no_error( res_3_1 <- tune_grid( wflow_3, resamples = mt_folds, grid = grid_3, control = control_grid(extract = extr_3_1) - ), - NA + ) ) - expect_error(extract_3_1 <- dplyr::bind_rows(res_3_1$.extracts), NA) + expect_no_error(extract_3_1 <- dplyr::bind_rows(res_3_1$.extracts)) expect_true(all(names(extract_3_1) == c("num_comp", "cost", ".extracts", ".config"))) expect_true( diff --git a/tests/testthat/test-finalization.R b/tests/testthat/test-finalization.R index b5a65d607..36f0ca5e2 100644 --- a/tests/testthat/test-finalization.R +++ b/tests/testthat/test-finalization.R @@ -23,9 +23,8 @@ test_that("cannot finalize with recipe parameters", { }) set.seed(987323) - expect_error( - suppressMessages(mod_1 %>% tune_grid(rec_2, resamples = rs, grid = 3)), - regex = NA + expect_no_error( + suppressMessages(mod_1 %>% tune_grid(rec_2, resamples = rs, grid = 3)) ) }) @@ -50,9 +49,8 @@ test_that("skip error if grid is supplied", { grid <- tibble::tibble(mtry = 1:3, deg_free = c(3, 3, 4), min_n = c(5, 4, 6)) set.seed(987323) - expect_error( - mod_1 %>% tune_grid(rec_1, resamples = rs, grid = grid), - regex = NA + expect_no_error( + mod_1 %>% tune_grid(rec_1, resamples = rs, grid = grid) ) }) diff --git a/tests/testthat/test-merge.R b/tests/testthat/test-merge.R index 3e8f5b1fd..1f62fdeb1 100644 --- a/tests/testthat/test-merge.R +++ b/tests/testthat/test-merge.R @@ -28,9 +28,8 @@ test_that("recipe merges", { 4L, 0.025, 12L, 1L ) - expect_error( - spline_updated <- merge(spline_rec, spline_grid), - NA + expect_no_error( + spline_updated <- merge(spline_rec, spline_grid) ) check_merged_tibble(spline_updated) for (i in 1:nrow(spline_grid)) { @@ -79,9 +78,8 @@ test_that("partially recipe merge", { 4L, 0.025, 12L, 1L ) - expect_error( - spline_updated <- merge(spline_rec, spline_grid[, -1]), - NA + expect_no_error( + spline_updated <- merge(spline_rec, spline_grid[, -1]) ) check_merged_tibble(spline_updated, complete = FALSE) for (i in 1:nrow(spline_grid)) { @@ -117,9 +115,8 @@ test_that("umerged recipe merge", { recipes::step_spline_b(recipes::all_predictors(), deg_free = tune(), degree = tune()) bst_grid <- tibble::tibble("funky name \n" = 1:4, rules = rep(c(TRUE, FALSE), each = 2)) - expect_error( - spline_updated <- merge(spline_rec, bst_grid), - NA + expect_no_error( + spline_updated <- merge(spline_rec, bst_grid) ) check_merged_tibble(spline_updated, complete = FALSE) for (i in 1:nrow(bst_grid)) { @@ -149,9 +146,8 @@ test_that("model spec merges", { parsnip::set_engine("C5.0", rules = tune(), noGlobalPruning = TRUE) bst_grid <- tibble::tibble("funky name \n" = 1:4, rules = rep(c(TRUE, FALSE), each = 2)) - expect_error( - bst_updated <- merge(bst_model, bst_grid), - NA + expect_no_error( + bst_updated <- merge(bst_model, bst_grid) ) check_merged_tibble(bst_updated, "model_spec") for (i in 1:nrow(bst_grid)) { @@ -190,9 +186,8 @@ test_that("partially model spec merge", { parsnip::set_engine("C5.0", rules = tune(), noGlobalPruning = TRUE) bst_grid <- tibble::tibble("funky name \n" = 1:4, rules = rep(c(TRUE, FALSE), each = 2)) - expect_error( - bst_updated <- merge(bst_model, bst_grid[, -1]), - NA + expect_no_error( + bst_updated <- merge(bst_model, bst_grid[, -1]) ) check_merged_tibble(bst_updated, "model_spec", complete = FALSE) for (i in 1:nrow(bst_grid)) { @@ -214,9 +209,8 @@ test_that("umerged model spec merge", { other_grid <- bst_grid names(bst_grid) <- letters[1:2] - expect_error( - bst_not_updated <- merge(bst_model, other_grid), - NA + expect_no_error( + bst_not_updated <- merge(bst_model, other_grid) ) check_merged_tibble(bst_not_updated, "model_spec", complete = FALSE) # for (i in 1:nrow(other_grid)) { diff --git a/tests/testthat/test-misc.R b/tests/testthat/test-misc.R index 7abfe2f37..6de3e8af0 100644 --- a/tests/testthat/test-misc.R +++ b/tests/testthat/test-misc.R @@ -49,9 +49,8 @@ test_that("in-line formulas on outcome", { add_formula(log(mpg) ~ .) %>% add_model(parsnip::linear_reg() %>% parsnip::set_engine("lm")) - expect_error( - f1 <- fit_resamples(w1, resamples = rsample::vfold_cv(mtcars)), - regex = NA + expect_no_error( + f1 <- fit_resamples(w1, resamples = rsample::vfold_cv(mtcars)) ) expect_true(inherits(f1, "resample_results")) @@ -60,9 +59,8 @@ test_that("in-line formulas on outcome", { add_recipe(recipes::recipe(mpg ~ ., data = mtcars) %>% recipes::step_log(mpg)) %>% add_model(parsnip::linear_reg() %>% parsnip::set_engine("lm")) - expect_error( - f2 <- fit_resamples(w2, resamples = rsample::vfold_cv(mtcars)), - regex = NA + expect_no_error( + f2 <- fit_resamples(w2, resamples = rsample::vfold_cv(mtcars)) ) expect_true(inherits(f2, "resample_results")) }) @@ -70,7 +68,7 @@ test_that("in-line formulas on outcome", { # ------------------------------------------------------------------------------ test_that("empty ellipses", { - expect_error(tune:::empty_ellipses(), regexp = NA) + expect_no_error(tune:::empty_ellipses()) expect_snapshot(tune:::empty_ellipses(a = 1)) })