From 8b1ab4f0ad554fa6edb5144a3a913faf140701aa Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?=EC=9D=B4=EA=B7=9C=EB=AF=BC?= <32768535+GyuminJack@users.noreply.github.com> Date: Thu, 20 Jul 2023 03:36:59 +0900 Subject: [PATCH 1/8] Fix Python Dockerfile (#5984) --- docker/dockerfile-python | 1 + 1 file changed, 1 insertion(+) diff --git a/docker/dockerfile-python b/docker/dockerfile-python index 541884811a0b..900d05c30012 100644 --- a/docker/dockerfile-python +++ b/docker/dockerfile-python @@ -26,6 +26,7 @@ RUN apt-get update && \ # lightgbm conda install -q -y numpy scipy scikit-learn pandas && \ git clone --recursive --branch stable --depth 1 https://github.com/Microsoft/LightGBM && \ + cd ./LightGBM && \ sh ./build-python.sh install && \ # clean apt-get autoremove -y && apt-get clean && \ From 9a84b61d93dec3c7a0a7e8cd2ee60a35de693de1 Mon Sep 17 00:00:00 2001 From: James Lamb Date: Thu, 20 Jul 2023 15:25:12 -0500 Subject: [PATCH 2/8] [ci] use newer h5py in AppVeyor jobs (fixes #5995) (#5996) --- .ci/test_windows.ps1 | 2 +- R-package/tests/testthat/test_dataset.R | 2 +- 2 files changed, 2 insertions(+), 2 deletions(-) diff --git a/.ci/test_windows.ps1 b/.ci/test_windows.ps1 index 5962a9441346..413af821e065 100644 --- a/.ci/test_windows.ps1 +++ b/.ci/test_windows.ps1 @@ -124,7 +124,7 @@ if (($env:TASK -eq "regular") -or (($env:APPVEYOR -eq "true") -and ($env:TASK -e cd $env:BUILD_SOURCESDIRECTORY/examples/python-guide @("import matplotlib", "matplotlib.use('Agg')") + (Get-Content "plot_example.py") | Set-Content "plot_example.py" (Get-Content "plot_example.py").replace('graph.render(view=True)', 'graph.render(view=False)') | Set-Content "plot_example.py" # prevent interactive window mode - conda install -q -y -n $env:CONDA_ENV h5py ipywidgets notebook + conda install -q -y -n $env:CONDA_ENV "h5py>3.0" ipywidgets notebook foreach ($file in @(Get-ChildItem *.py)) { @("import sys, warnings", "warnings.showwarning = lambda message, category, filename, lineno, file=None, line=None: sys.stdout.write(warnings.formatwarning(message, category, filename, lineno, line))") + (Get-Content $file) | Set-Content $file python $file ; Check-Output $? diff --git a/R-package/tests/testthat/test_dataset.R b/R-package/tests/testthat/test_dataset.R index e7fbe692febe..7ea95655cca2 100644 --- a/R-package/tests/testthat/test_dataset.R +++ b/R-package/tests/testthat/test_dataset.R @@ -446,7 +446,7 @@ test_that("lgb.Dataset: should be able to use and retrieve long feature names", # set one feature to a value longer than the default buffer size used # in LGBM_DatasetGetFeatureNames_R feature_names <- names(iris) - long_name <- paste0(rep("a", 1000L), collapse = "") + long_name <- strrep("a", 1000L) feature_names[1L] <- long_name names(iris) <- feature_names # check that feature name survived the trip from R to C++ and back From 8967debeb5cb03875eb96a22dbcaa5387b5abaa3 Mon Sep 17 00:00:00 2001 From: James Lamb Date: Thu, 20 Jul 2023 23:55:16 -0500 Subject: [PATCH 3/8] [R-package] remove unused internal variables (#5991) --- R-package/R/lgb.Dataset.R | 7 +++---- R-package/R/lgb.Predictor.R | 2 -- R-package/R/lgb.convert_with_rules.R | 4 ---- R-package/R/lgb.cv.R | 2 -- R-package/tests/testthat/test_lgb.Booster.R | 2 +- 5 files changed, 4 insertions(+), 13 deletions(-) diff --git a/R-package/R/lgb.Dataset.R b/R-package/R/lgb.Dataset.R index 4df0acbdf005..e2892ea4bae0 100644 --- a/R-package/R/lgb.Dataset.R +++ b/R-package/R/lgb.Dataset.R @@ -494,11 +494,10 @@ Dataset <- R6::R6Class( if (info_len > 0L) { # Get back fields - ret <- NULL - ret <- if (field_name == "group") { - integer(info_len) + if (field_name == "group") { + ret <- integer(info_len) } else { - numeric(info_len) + ret <- numeric(info_len) } .Call( diff --git a/R-package/R/lgb.Predictor.R b/R-package/R/lgb.Predictor.R index 7f036c9726b6..0b7b39e2d8c2 100644 --- a/R-package/R/lgb.Predictor.R +++ b/R-package/R/lgb.Predictor.R @@ -98,8 +98,6 @@ Predictor <- R6::R6Class( start_iteration <- 0L } - num_row <- 0L - # Check if data is a file name and not a matrix if (identical(class(data), "character") && length(data) == 1L) { diff --git a/R-package/R/lgb.convert_with_rules.R b/R-package/R/lgb.convert_with_rules.R index f282fa3152fc..f024e9dfe6e9 100644 --- a/R-package/R/lgb.convert_with_rules.R +++ b/R-package/R/lgb.convert_with_rules.R @@ -116,10 +116,6 @@ lgb.convert_with_rules <- function(data, rules = NULL) { column_classes <- .get_column_classes(df = data) - is_char <- which(column_classes == "character") - is_factor <- which(column_classes == "factor") - is_logical <- which(column_classes == "logical") - is_data_table <- data.table::is.data.table(x = data) is_data_frame <- is.data.frame(data) diff --git a/R-package/R/lgb.cv.R b/R-package/R/lgb.cv.R index 35c39cd9b300..f81026fe673f 100644 --- a/R-package/R/lgb.cv.R +++ b/R-package/R/lgb.cv.R @@ -225,8 +225,6 @@ lgb.cv <- function(params = list() stop(sQuote("folds"), " must be a list with 2 or more elements that are vectors of indices for each CV-fold") } - nfold <- length(folds) - } else { if (nfold <= 1L) { diff --git a/R-package/tests/testthat/test_lgb.Booster.R b/R-package/tests/testthat/test_lgb.Booster.R index 8708e3237079..1737614b1243 100644 --- a/R-package/tests/testthat/test_lgb.Booster.R +++ b/R-package/tests/testthat/test_lgb.Booster.R @@ -1341,7 +1341,7 @@ test_that("Booster's print, show, and summary work correctly", { .has_expected_content_for_fitted_model(log_txt) # summary() - log_text <- capture.output({ + log_txt <- capture.output({ ret <- summary(model) }) .have_same_handle(ret, model) From 44928d3ae3f3ece7cbf577d59e892333f60508ee Mon Sep 17 00:00:00 2001 From: James Lamb Date: Fri, 21 Jul 2023 10:15:57 -0500 Subject: [PATCH 4/8] [R-package] consolidate testing constants in helpers file (#5992) --- R-package/tests/testthat/helper.R | 19 ++ R-package/tests/testthat/test_Predictor.R | 44 ++-- R-package/tests/testthat/test_basic.R | 242 +++++++++--------- .../tests/testthat/test_custom_objective.R | 16 +- R-package/tests/testthat/test_dataset.R | 20 +- .../tests/testthat/test_learning_to_rank.R | 29 +-- R-package/tests/testthat/test_lgb.Booster.R | 101 ++++---- .../tests/testthat/test_lgb.interprete.R | 8 +- .../tests/testthat/test_lgb.plot.importance.R | 6 +- .../testthat/test_lgb.plot.interpretation.R | 8 +- R-package/tests/testthat/test_weighted_loss.R | 12 +- 11 files changed, 232 insertions(+), 273 deletions(-) diff --git a/R-package/tests/testthat/helper.R b/R-package/tests/testthat/helper.R index 16d0d163cc24..9da2f9bd7167 100644 --- a/R-package/tests/testthat/helper.R +++ b/R-package/tests/testthat/helper.R @@ -1,5 +1,6 @@ # ref for this file: # +# * https://r-pkgs.org/testing-design.html#testthat-helper-files # * https://r-pkgs.org/testing-design.html#testthat-setup-files # LightGBM-internal fix to comply with CRAN policy of only using up to 2 threads in tests and example. @@ -10,3 +11,21 @@ # the check farm is a shared resource and will typically be running many checks simultaneously. # .LGB_MAX_THREADS <- 2L + +# by default, how much should results in tests be allowed to differ from hard-coded expected numbers? +.LGB_NUMERIC_TOLERANCE <- 1e-6 + +# are the tests running on Windows? +.LGB_ON_WINDOWS <- .Platform$OS.type == "windows" +.LGB_ON_32_BIT_WINDOWS <- .LGB_ON_WINDOWS && .Machine$sizeof.pointer != 8L + +# are the tests running in a UTF-8 locale? +.LGB_UTF8_LOCALE <- all(endsWith( + Sys.getlocale(category = "LC_CTYPE") + , "UTF-8" +)) + +# control how many loud LightGBM's logger is in tests +.LGB_VERBOSITY <- as.integer( + Sys.getenv("LIGHTGBM_TEST_VERBOSITY", "-1") +) diff --git a/R-package/tests/testthat/test_Predictor.R b/R-package/tests/testthat/test_Predictor.R index 3de2fff297c1..90be1d08cf67 100644 --- a/R-package/tests/testthat/test_Predictor.R +++ b/R-package/tests/testthat/test_Predictor.R @@ -1,11 +1,5 @@ library(Matrix) -VERBOSITY <- as.integer( - Sys.getenv("LIGHTGBM_TEST_VERBOSITY", "-1") -) - -TOLERANCE <- 1e-6 - test_that("Predictor$finalize() should not fail", { X <- as.matrix(as.integer(iris[, "Species"]), ncol = 1L) y <- iris[["Sepal.Length"]] @@ -16,7 +10,7 @@ test_that("Predictor$finalize() should not fail", { objective = "regression" , num_threads = .LGB_MAX_THREADS ) - , verbose = VERBOSITY + , verbose = .LGB_VERBOSITY , nrounds = 3L ) model_file <- tempfile(fileext = ".model") @@ -45,7 +39,7 @@ test_that("predictions do not fail for integer input", { objective = "regression" , num_threads = .LGB_MAX_THREADS ) - , verbose = VERBOSITY + , verbose = .LGB_VERBOSITY , nrounds = 3L ) X_double <- X[c(1L, 51L, 101L), , drop = FALSE] @@ -78,7 +72,7 @@ test_that("start_iteration works correctly", { num_leaves = 4L , learning_rate = 0.6 , objective = "binary" - , verbosity = VERBOSITY + , verbosity = .LGB_VERBOSITY , num_threads = .LGB_MAX_THREADS ) , nrounds = 50L @@ -128,7 +122,7 @@ test_that("Feature contributions from sparse inputs produce sparse outputs", { data = dtrain , obj = "regression" , nrounds = 5L - , verbose = VERBOSITY + , verbose = .LGB_VERBOSITY , params = list(min_data_in_leaf = 5L, num_threads = .LGB_MAX_THREADS) ) @@ -159,7 +153,7 @@ test_that("Sparse feature contribution predictions do not take inputs with wrong data = dtrain , obj = "regression" , nrounds = 5L - , verbose = VERBOSITY + , verbose = .LGB_VERBOSITY , params = list(min_data_in_leaf = 5L, num_threads = .LGB_MAX_THREADS) ) @@ -189,7 +183,7 @@ test_that("Feature contribution predictions do not take non-general CSR or CSC i data = dtrain , obj = "regression" , nrounds = 5L - , verbose = VERBOSITY + , verbose = .LGB_VERBOSITY , params = list(min_data_in_leaf = 5L, num_threads = .LGB_MAX_THREADS) ) @@ -217,14 +211,14 @@ test_that("predict() params should override keyword argument for raw-score predi , num_threads = .LGB_MAX_THREADS ) , nrounds = 10L - , verbose = VERBOSITY + , verbose = .LGB_VERBOSITY ) # check that the predictions from predict.lgb.Booster() really look like raw score predictions preds_prob <- predict(bst, X) preds_raw_s3_keyword <- predict(bst, X, type = "raw") preds_prob_from_raw <- 1.0 / (1.0 + exp(-preds_raw_s3_keyword)) - expect_equal(preds_prob, preds_prob_from_raw, tolerance = TOLERANCE) + expect_equal(preds_prob, preds_prob_from_raw, tolerance = .LGB_NUMERIC_TOLERANCE) accuracy <- sum(as.integer(preds_prob_from_raw > 0.5) == y) / length(y) expect_equal(accuracy, 1.0) @@ -269,7 +263,7 @@ test_that("predict() params should override keyword argument for leaf-index pred , num_threads = .LGB_MAX_THREADS ) , nrounds = 10L - , verbose = VERBOSITY + , verbose = .LGB_VERBOSITY ) # check that predictions really look like leaf index predictions @@ -323,7 +317,7 @@ test_that("predict() params should override keyword argument for feature contrib , num_threads = .LGB_MAX_THREADS ) , nrounds = 10L - , verbose = VERBOSITY + , verbose = .LGB_VERBOSITY ) # check that predictions really look like feature contributions @@ -431,7 +425,7 @@ test_that("predict() keeps row names from data (regression)", { data = dtrain , obj = "regression" , nrounds = 5L - , verbose = VERBOSITY + , verbose = .LGB_VERBOSITY , params = list(min_data_in_leaf = 1L, num_threads = .LGB_MAX_THREADS) ) .check_all_row_name_expectations(bst, X) @@ -447,7 +441,7 @@ test_that("predict() keeps row names from data (binary classification)", { data = dtrain , obj = "binary" , nrounds = 5L - , verbose = VERBOSITY + , verbose = .LGB_VERBOSITY , params = list(num_threads = .LGB_MAX_THREADS) ) .check_all_row_name_expectations(bst, X) @@ -464,7 +458,7 @@ test_that("predict() keeps row names from data (multi-class classification)", { , obj = "multiclass" , params = list(num_class = 3L, num_threads = .LGB_MAX_THREADS) , nrounds = 5L - , verbose = VERBOSITY + , verbose = .LGB_VERBOSITY ) .check_all_row_name_expectations(bst, X) }) @@ -485,7 +479,7 @@ test_that("predictions for regression and binary classification are returned as data = dtrain , obj = "regression" , nrounds = 5L - , verbose = VERBOSITY + , verbose = .LGB_VERBOSITY , params = list(min_data_in_leaf = 1L, num_threads = .LGB_MAX_THREADS) ) pred <- predict(model, X) @@ -503,7 +497,7 @@ test_that("predictions for regression and binary classification are returned as data = dtrain , obj = "binary" , nrounds = 5L - , verbose = VERBOSITY + , verbose = .LGB_VERBOSITY , params = list(num_threads = .LGB_MAX_THREADS) ) pred <- predict(model, X) @@ -523,7 +517,7 @@ test_that("predictions for multiclass classification are returned as matrix", { data = dtrain , obj = "multiclass" , nrounds = 5L - , verbose = VERBOSITY + , verbose = .LGB_VERBOSITY , params = list(num_class = 3L, num_threads = .LGB_MAX_THREADS) ) pred <- predict(model, X) @@ -668,7 +662,7 @@ test_that("predict type='class' returns predicted class for classification objec data = dtrain , obj = "binary" , nrounds = 5L - , verbose = VERBOSITY + , verbose = .LGB_VERBOSITY , params = list(num_threads = .LGB_MAX_THREADS) ) pred <- predict(bst, X, type = "class") @@ -682,7 +676,7 @@ test_that("predict type='class' returns predicted class for classification objec data = dtrain , obj = "multiclass" , nrounds = 5L - , verbose = VERBOSITY + , verbose = .LGB_VERBOSITY , params = list(num_class = 3L, num_threads = .LGB_MAX_THREADS) ) pred <- predict(model, X, type = "class") @@ -698,7 +692,7 @@ test_that("predict type='class' returns values in the target's range for regress data = dtrain , obj = "regression" , nrounds = 5L - , verbose = VERBOSITY + , verbose = .LGB_VERBOSITY , params = list(num_threads = .LGB_MAX_THREADS) ) pred <- predict(bst, X, type = "class") diff --git a/R-package/tests/testthat/test_basic.R b/R-package/tests/testthat/test_basic.R index 651dd017c164..b0253b1e488e 100644 --- a/R-package/tests/testthat/test_basic.R +++ b/R-package/tests/testthat/test_basic.R @@ -1,20 +1,8 @@ -VERBOSITY <- as.integer( - Sys.getenv("LIGHTGBM_TEST_VERBOSITY", "-1") -) - -ON_WINDOWS <- .Platform$OS.type == "windows" - -UTF8_LOCALE <- all(endsWith( - Sys.getlocale(category = "LC_CTYPE") - , "UTF-8" -)) - data(agaricus.train, package = "lightgbm") data(agaricus.test, package = "lightgbm") train <- agaricus.train test <- agaricus.test -TOLERANCE <- 1e-6 set.seed(708L) # [description] Every time this function is called, it adds 0.1 @@ -82,7 +70,7 @@ test_that("train and predict binary classification", { num_leaves = 5L , objective = "binary" , metric = "binary_error" - , verbose = VERBOSITY + , verbose = .LGB_VERBOSITY , num_threads = .LGB_MAX_THREADS ) , nrounds = nrounds @@ -104,7 +92,7 @@ test_that("train and predict binary classification", { expect_equal(length(pred1), 6513L) err_pred1 <- sum((pred1 > 0.5) != train$label) / length(train$label) err_log <- record_results[1L] - expect_lt(abs(err_pred1 - err_log), TOLERANCE) + expect_lt(abs(err_pred1 - err_log), .LGB_NUMERIC_TOLERANCE) }) @@ -124,7 +112,7 @@ test_that("train and predict softmax", { , objective = "multiclass" , metric = "multi_error" , num_class = 3L - , verbose = VERBOSITY + , verbose = .LGB_VERBOSITY , num_threads = .LGB_MAX_THREADS ) , nrounds = 20L @@ -155,7 +143,7 @@ test_that("use of multiple eval metrics works", { , learning_rate = 1.0 , objective = "binary" , metric = metrics - , verbose = VERBOSITY + , verbose = .LGB_VERBOSITY , num_threads = .LGB_MAX_THREADS ) , nrounds = 10L @@ -186,13 +174,13 @@ test_that("lgb.Booster.upper_bound() and lgb.Booster.lower_bound() work as expec num_leaves = 5L , objective = "binary" , metric = "binary_error" - , verbose = VERBOSITY + , verbose = .LGB_VERBOSITY , num_threads = .LGB_MAX_THREADS ) , nrounds = nrounds ) - expect_true(abs(bst$lower_bound() - -1.590853) < TOLERANCE) - expect_true(abs(bst$upper_bound() - 1.871015) < TOLERANCE) + expect_true(abs(bst$lower_bound() - -1.590853) < .LGB_NUMERIC_TOLERANCE) + expect_true(abs(bst$upper_bound() - 1.871015) < .LGB_NUMERIC_TOLERANCE) }) test_that("lgb.Booster.upper_bound() and lgb.Booster.lower_bound() work as expected for regression", { @@ -205,13 +193,13 @@ test_that("lgb.Booster.upper_bound() and lgb.Booster.lower_bound() work as expec num_leaves = 5L , objective = "regression" , metric = "l2" - , verbose = VERBOSITY + , verbose = .LGB_VERBOSITY , num_threads = .LGB_MAX_THREADS ) , nrounds = nrounds ) - expect_true(abs(bst$lower_bound() - 0.1513859) < TOLERANCE) - expect_true(abs(bst$upper_bound() - 0.9080349) < TOLERANCE) + expect_true(abs(bst$lower_bound() - 0.1513859) < .LGB_NUMERIC_TOLERANCE) + expect_true(abs(bst$upper_bound() - 0.9080349) < .LGB_NUMERIC_TOLERANCE) }) test_that("lightgbm() rejects negative or 0 value passed to nrounds", { @@ -240,7 +228,7 @@ test_that("lightgbm() accepts nrounds as either a top-level argument or paramete objective = "regression" , metric = "l2" , num_leaves = 5L - , verbose = VERBOSITY + , verbose = .LGB_VERBOSITY , num_threads = .LGB_MAX_THREADS ) ) @@ -254,7 +242,7 @@ test_that("lightgbm() accepts nrounds as either a top-level argument or paramete , metric = "l2" , num_leaves = 5L , nrounds = nrounds - , verbose = VERBOSITY + , verbose = .LGB_VERBOSITY , num_threads = .LGB_MAX_THREADS ) ) @@ -269,7 +257,7 @@ test_that("lightgbm() accepts nrounds as either a top-level argument or paramete , metric = "l2" , num_leaves = 5L , nrounds = nrounds - , verbose = VERBOSITY + , verbose = .LGB_VERBOSITY , num_threads = .LGB_MAX_THREADS ) ) @@ -316,7 +304,7 @@ test_that("lightgbm() performs evaluation on validation sets if they are provide "binary_error" , "auc" ) - , verbose = VERBOSITY + , verbose = .LGB_VERBOSITY , num_threads = .LGB_MAX_THREADS ) , nrounds = nrounds @@ -341,9 +329,9 @@ test_that("lightgbm() performs evaluation on validation sets if they are provide eval_results <- bst$record_evals[[valid_name]][["binary_error"]] expect_length(eval_results[["eval"]], nrounds) } - expect_true(abs(bst$record_evals[["train"]][["binary_error"]][["eval"]][[1L]] - 0.02226317) < TOLERANCE) - expect_true(abs(bst$record_evals[["valid1"]][["binary_error"]][["eval"]][[1L]] - 0.02226317) < TOLERANCE) - expect_true(abs(bst$record_evals[["valid2"]][["binary_error"]][["eval"]][[1L]] - 0.02226317) < TOLERANCE) + expect_true(abs(bst$record_evals[["train"]][["binary_error"]][["eval"]][[1L]] - 0.02226317) < .LGB_NUMERIC_TOLERANCE) + expect_true(abs(bst$record_evals[["valid1"]][["binary_error"]][["eval"]][[1L]] - 0.02226317) < .LGB_NUMERIC_TOLERANCE) + expect_true(abs(bst$record_evals[["valid2"]][["binary_error"]][["eval"]][[1L]] - 0.02226317) < .LGB_NUMERIC_TOLERANCE) }) test_that("training continuation works", { @@ -359,7 +347,7 @@ test_that("training continuation works", { , metric = "binary_logloss" , num_leaves = 5L , learning_rate = 1.0 - , verbose = VERBOSITY + , verbose = .LGB_VERBOSITY , num_threads = .LGB_MAX_THREADS ) @@ -386,7 +374,7 @@ test_that("cv works", { , metric = "l2,l1" , min_data = 1L , learning_rate = 1.0 - , verbose = VERBOSITY + , verbose = .LGB_VERBOSITY , num_threads = .LGB_MAX_THREADS ) bst <- lgb.cv( @@ -407,7 +395,7 @@ test_that("CVBooster$reset_parameter() works as expected", { objective = "regression" , min_data = 1L , num_leaves = 7L - , verbose = VERBOSITY + , verbose = .LGB_VERBOSITY , num_threads = .LGB_MAX_THREADS ) , data = dtrain @@ -487,7 +475,7 @@ test_that("lightgbm.cv() gives the correct best_score and best_iter for a metric , metric = "auc,binary_error" , learning_rate = 1.5 , num_leaves = 5L - , verbose = VERBOSITY + , verbose = .LGB_VERBOSITY , num_threads = .LGB_MAX_THREADS ) ) @@ -551,7 +539,7 @@ test_that("lgb.cv() respects showsd argument", { objective = "regression" , metric = "l2" , min_data = 1L - , verbose = VERBOSITY + , verbose = .LGB_VERBOSITY , num_threads = .LGB_MAX_THREADS ) nrounds <- 5L @@ -594,7 +582,7 @@ test_that("lgb.cv() raises an informative error for unrecognized objectives", { data = dtrain , params = list( objective_type = "not_a_real_objective" - , verbosity = VERBOSITY + , verbosity = .LGB_VERBOSITY , num_threads = .LGB_MAX_THREADS ) ) @@ -616,7 +604,7 @@ test_that("lgb.cv() respects parameter aliases for objective", { num_leaves = 5L , application = "binary" , num_iterations = nrounds - , verbose = VERBOSITY + , verbose = .LGB_VERBOSITY , num_threads = .LGB_MAX_THREADS ) , nfold = nfold @@ -637,7 +625,7 @@ test_that("lgb.cv() prefers objective in params to keyword argument", { ) , params = list( application = "regression_l1" - , verbosity = VERBOSITY + , verbosity = .LGB_VERBOSITY , num_threads = .LGB_MAX_THREADS ) , nrounds = 5L @@ -673,7 +661,7 @@ test_that("lgb.cv() respects parameter aliases for metric", { , objective = "binary" , num_iterations = nrounds , metric_types = c("auc", "binary_logloss") - , verbose = VERBOSITY + , verbose = .LGB_VERBOSITY , num_threads = .LGB_MAX_THREADS ) , nfold = nfold @@ -691,7 +679,7 @@ test_that("lgb.cv() respects eval_train_metric argument", { objective = "regression" , metric = "l2" , min_data = 1L - , verbose = VERBOSITY + , verbose = .LGB_VERBOSITY , num_threads = .LGB_MAX_THREADS ) nrounds <- 5L @@ -739,7 +727,7 @@ test_that("lgb.train() works as expected with multiple eval metrics", { objective = "binary" , metric = metrics , learning_rate = 1.0 - , verbose = VERBOSITY + , verbose = .LGB_VERBOSITY , num_threads = .LGB_MAX_THREADS ) , valids = list( @@ -770,7 +758,7 @@ test_that("lgb.train() raises an informative error for unrecognized objectives", data = dtrain , params = list( objective_type = "not_a_real_objective" - , verbosity = VERBOSITY + , verbosity = .LGB_VERBOSITY ) ) }, type = "message") @@ -790,7 +778,7 @@ test_that("lgb.train() respects parameter aliases for objective", { num_leaves = 5L , application = "binary" , num_iterations = nrounds - , verbose = VERBOSITY + , verbose = .LGB_VERBOSITY , num_threads = .LGB_MAX_THREADS ) , valids = list( @@ -812,7 +800,7 @@ test_that("lgb.train() prefers objective in params to keyword argument", { ) , params = list( loss = "regression_l1" - , verbosity = VERBOSITY + , verbosity = .LGB_VERBOSITY , num_threads = .LGB_MAX_THREADS ) , nrounds = 5L @@ -844,7 +832,7 @@ test_that("lgb.train() respects parameter aliases for metric", { , objective = "binary" , num_iterations = nrounds , metric_types = c("auc", "binary_logloss") - , verbose = VERBOSITY + , verbose = .LGB_VERBOSITY , num_threads = .LGB_MAX_THREADS ) , valids = list( @@ -863,7 +851,7 @@ test_that("lgb.train() rejects negative or 0 value passed to nrounds", { params <- list( objective = "regression" , metric = "l2,l1" - , verbose = VERBOSITY + , verbose = .LGB_VERBOSITY , num_threads = .LGB_MAX_THREADS ) for (nround_value in c(-10L, 0L)) { @@ -893,7 +881,7 @@ test_that("lgb.train() accepts nrounds as either a top-level argument or paramet objective = "regression" , metric = "l2" , num_leaves = 5L - , verbose = VERBOSITY + , verbose = .LGB_VERBOSITY , num_threads = .LGB_MAX_THREADS ) ) @@ -910,7 +898,7 @@ test_that("lgb.train() accepts nrounds as either a top-level argument or paramet , metric = "l2" , num_leaves = 5L , nrounds = nrounds - , verbose = VERBOSITY + , verbose = .LGB_VERBOSITY ) ) @@ -927,7 +915,7 @@ test_that("lgb.train() accepts nrounds as either a top-level argument or paramet , metric = "l2" , num_leaves = 5L , nrounds = nrounds - , verbose = VERBOSITY + , verbose = .LGB_VERBOSITY , num_threads = .LGB_MAX_THREADS ) ) @@ -967,7 +955,7 @@ test_that("lgb.train() throws an informative error if 'data' is not an lgb.Datas params = list( objective = "regression" , metric = "l2,l1" - , verbose = VERBOSITY + , verbose = .LGB_VERBOSITY ) , data = val , 10L @@ -986,7 +974,7 @@ test_that("lgb.train() throws an informative error if 'valids' is not a list of params = list( objective = "regression" , metric = "l2,l1" - , verbose = VERBOSITY + , verbose = .LGB_VERBOSITY ) , data = lgb.Dataset(train$data, label = train$label) , 10L @@ -1005,7 +993,7 @@ test_that("lgb.train() errors if 'valids' is a list of lgb.Dataset objects but s params = list( objective = "regression" , metric = "l2,l1" - , verbose = VERBOSITY + , verbose = .LGB_VERBOSITY ) , data = lgb.Dataset(train$data, label = train$label) , 10L @@ -1024,7 +1012,7 @@ test_that("lgb.train() throws an informative error if 'valids' contains lgb.Data params = list( objective = "regression" , metric = "l2,l1" - , verbose = VERBOSITY + , verbose = .LGB_VERBOSITY ) , data = lgb.Dataset(train$data, label = train$label) , 10L @@ -1045,7 +1033,7 @@ test_that("lgb.train() works with force_col_wise and force_row_wise", { objective = "binary" , metric = "binary_error" , force_col_wise = TRUE - , verbose = VERBOSITY + , verbose = .LGB_VERBOSITY , num_threads = .LGB_MAX_THREADS ) bst_col_wise <- lgb.train( @@ -1058,7 +1046,7 @@ test_that("lgb.train() works with force_col_wise and force_row_wise", { objective = "binary" , metric = "binary_error" , force_row_wise = TRUE - , verbose = VERBOSITY + , verbose = .LGB_VERBOSITY , num_threads = .LGB_MAX_THREADS ) bst_row_wise <- lgb.train( @@ -1099,7 +1087,7 @@ test_that("lgb.train() works as expected with sparse features", { objective = "binary" , min_data = 1L , min_data_in_bin = 1L - , verbose = VERBOSITY + , verbose = .LGB_VERBOSITY , num_threads = .LGB_MAX_THREADS ) , data = dtrain @@ -1112,7 +1100,7 @@ test_that("lgb.train() works as expected with sparse features", { expect_equal(parsed_model$objective, "binary sigmoid:1") expect_false(parsed_model$average_output) expected_error <- 0.6931268 - expect_true(abs(bst$eval_train()[[1L]][["value"]] - expected_error) < TOLERANCE) + expect_true(abs(bst$eval_train()[[1L]][["value"]] - expected_error) < .LGB_NUMERIC_TOLERANCE) }) test_that("lgb.train() works with early stopping for classification", { @@ -1143,7 +1131,7 @@ test_that("lgb.train() works with early stopping for classification", { params = list( objective = "binary" , metric = "binary_error" - , verbose = VERBOSITY + , verbose = .LGB_VERBOSITY , num_threads = .LGB_MAX_THREADS ) , data = dtrain @@ -1168,7 +1156,7 @@ test_that("lgb.train() works with early stopping for classification", { objective = "binary" , metric = "binary_error" , early_stopping_rounds = early_stopping_rounds - , verbose = VERBOSITY + , verbose = .LGB_VERBOSITY , num_threads = .LGB_MAX_THREADS ) , data = dtrain @@ -1220,7 +1208,7 @@ test_that("lgb.train() treats early_stopping_rounds<=0 as disabling early stoppi params = list( objective = "binary" , metric = "binary_error" - , verbose = VERBOSITY + , verbose = .LGB_VERBOSITY , num_threads = .LGB_MAX_THREADS ) , data = dtrain @@ -1245,7 +1233,7 @@ test_that("lgb.train() treats early_stopping_rounds<=0 as disabling early stoppi objective = "binary" , metric = "binary_error" , n_iter_no_change = value - , verbose = VERBOSITY + , verbose = .LGB_VERBOSITY , num_threads = .LGB_MAX_THREADS ) , data = dtrain @@ -1288,7 +1276,7 @@ test_that("lgb.train() works with early stopping for classification with a metri , metric = "auc" , max_depth = 3L , early_stopping_rounds = early_stopping_rounds - , verbose = VERBOSITY + , verbose = .LGB_VERBOSITY , num_threads = .LGB_MAX_THREADS ) , data = dtrain @@ -1303,7 +1291,7 @@ test_that("lgb.train() works with early stopping for classification with a metri , metric = "binary_error" , max_depth = 3L , early_stopping_rounds = early_stopping_rounds - , verbose = VERBOSITY + , verbose = .LGB_VERBOSITY , num_threads = .LGB_MAX_THREADS ) , data = dtrain @@ -1322,7 +1310,7 @@ test_that("lgb.train() works with early stopping for classification with a metri ) expect_identical(bst_binary_error$best_iter, 1L) expect_identical(bst_binary_error$current_iter(), early_stopping_rounds + 1L) - expect_true(abs(bst_binary_error$best_score - 0.01613904) < TOLERANCE) + expect_true(abs(bst_binary_error$best_score - 0.01613904) < .LGB_NUMERIC_TOLERANCE) # early stopping should not have been hit for AUC (higher_better = TRUE) eval_info <- bst_auc$.__enclos_env__$private$get_eval_info() @@ -1333,7 +1321,7 @@ test_that("lgb.train() works with early stopping for classification with a metri ) expect_identical(bst_auc$best_iter, 9L) expect_identical(bst_auc$current_iter(), nrounds) - expect_true(abs(bst_auc$best_score - 0.9999969) < TOLERANCE) + expect_true(abs(bst_auc$best_score - 0.9999969) < .LGB_NUMERIC_TOLERANCE) }) test_that("lgb.train() works with early stopping for regression", { @@ -1365,7 +1353,7 @@ test_that("lgb.train() works with early stopping for regression", { params = list( objective = "regression" , metric = "rmse" - , verbose = VERBOSITY + , verbose = .LGB_VERBOSITY , num_threads = .LGB_MAX_THREADS ) , data = dtrain @@ -1390,7 +1378,7 @@ test_that("lgb.train() works with early stopping for regression", { objective = "regression" , metric = "rmse" , early_stopping_rounds = early_stopping_rounds - , verbose = VERBOSITY + , verbose = .LGB_VERBOSITY , num_threads = .LGB_MAX_THREADS ) , data = dtrain @@ -1426,7 +1414,7 @@ test_that("lgb.train() does not stop early if early_stopping_rounds is not given params = list( objective = "regression" , metric = "None" - , verbose = VERBOSITY + , verbose = .LGB_VERBOSITY , num_threads = .LGB_MAX_THREADS ) , data = DTRAIN_RANDOM_REGRESSION @@ -1471,7 +1459,7 @@ test_that("If first_metric_only is not given or is FALSE, lgb.train() decides to objective = "regression" , metric = "None" , early_stopping_rounds = early_stopping_rounds - , verbose = VERBOSITY + , verbose = .LGB_VERBOSITY , num_threads = .LGB_MAX_THREADS ) , list( @@ -1479,7 +1467,7 @@ test_that("If first_metric_only is not given or is FALSE, lgb.train() decides to , metric = "None" , early_stopping_rounds = early_stopping_rounds , first_metric_only = FALSE - , verbose = VERBOSITY + , verbose = .LGB_VERBOSITY , num_threads = .LGB_MAX_THREADS ) ) @@ -1543,7 +1531,7 @@ test_that("If first_metric_only is TRUE, lgb.train() decides to stop early based , metric = "None" , early_stopping_rounds = early_stopping_rounds , first_metric_only = TRUE - , verbose = VERBOSITY + , verbose = .LGB_VERBOSITY , num_threads = .LGB_MAX_THREADS ) , data = DTRAIN_RANDOM_REGRESSION @@ -1590,7 +1578,7 @@ test_that("lgb.train() works when a mixture of functions and strings are passed params = list( objective = "regression" , metric = "None" - , verbose = VERBOSITY + , verbose = .LGB_VERBOSITY , num_threads = .LGB_MAX_THREADS ) , data = DTRAIN_RANDOM_REGRESSION @@ -1616,15 +1604,15 @@ test_that("lgb.train() works when a mixture of functions and strings are passed # the difference metrics shouldn't have been mixed up with each other results <- bst$record_evals[["valid1"]] - expect_true(abs(results[["rmse"]][["eval"]][[1L]] - 1.105012) < TOLERANCE) - expect_true(abs(results[["l2"]][["eval"]][[1L]] - 1.221051) < TOLERANCE) + expect_true(abs(results[["rmse"]][["eval"]][[1L]] - 1.105012) < .LGB_NUMERIC_TOLERANCE) + expect_true(abs(results[["l2"]][["eval"]][[1L]] - 1.221051) < .LGB_NUMERIC_TOLERANCE) expected_increasing_metric <- increasing_metric_starting_value + 0.1 expect_true( abs( results[["increasing_metric"]][["eval"]][[1L]] - expected_increasing_metric - ) < TOLERANCE + ) < .LGB_NUMERIC_TOLERANCE ) - expect_true(abs(results[["constant_metric"]][["eval"]][[1L]] - CONSTANT_METRIC_VALUE) < TOLERANCE) + expect_true(abs(results[["constant_metric"]][["eval"]][[1L]] - CONSTANT_METRIC_VALUE) < .LGB_NUMERIC_TOLERANCE) }) @@ -1647,7 +1635,7 @@ test_that("lgb.train() works when a list of strings or a character vector is pas params = list( objective = "binary" , metric = "None" - , verbose = VERBOSITY + , verbose = .LGB_VERBOSITY , num_threads = .LGB_MAX_THREADS ) , data = DTRAIN_RANDOM_CLASSIFICATION @@ -1669,10 +1657,10 @@ test_that("lgb.train() works when a list of strings or a character vector is pas # the difference metrics shouldn't have been mixed up with each other results <- bst$record_evals[["valid1"]] if ("binary_error" %in% unlist(eval_variation)) { - expect_true(abs(results[["binary_error"]][["eval"]][[1L]] - 0.4864865) < TOLERANCE) + expect_true(abs(results[["binary_error"]][["eval"]][[1L]] - 0.4864865) < .LGB_NUMERIC_TOLERANCE) } if ("binary_logloss" %in% unlist(eval_variation)) { - expect_true(abs(results[["binary_logloss"]][["eval"]][[1L]] - 0.6932548) < TOLERANCE) + expect_true(abs(results[["binary_logloss"]][["eval"]][[1L]] - 0.6932548) < .LGB_NUMERIC_TOLERANCE) } } }) @@ -1685,7 +1673,7 @@ test_that("lgb.train() works when you specify both 'metric' and 'eval' with stri params = list( objective = "binary" , metric = "binary_error" - , verbose = VERBOSITY + , verbose = .LGB_VERBOSITY , num_threads = .LGB_MAX_THREADS ) , data = DTRAIN_RANDOM_CLASSIFICATION @@ -1706,8 +1694,8 @@ test_that("lgb.train() works when you specify both 'metric' and 'eval' with stri # the difference metrics shouldn't have been mixed up with each other results <- bst$record_evals[["valid1"]] - expect_true(abs(results[["binary_error"]][["eval"]][[1L]] - 0.4864865) < TOLERANCE) - expect_true(abs(results[["binary_logloss"]][["eval"]][[1L]] - 0.6932548) < TOLERANCE) + expect_true(abs(results[["binary_error"]][["eval"]][[1L]] - 0.4864865) < .LGB_NUMERIC_TOLERANCE) + expect_true(abs(results[["binary_logloss"]][["eval"]][[1L]] - 0.6932548) < .LGB_NUMERIC_TOLERANCE) }) test_that("lgb.train() works when you give a function for eval", { @@ -1718,7 +1706,7 @@ test_that("lgb.train() works when you give a function for eval", { params = list( objective = "binary" , metric = "None" - , verbose = VERBOSITY + , verbose = .LGB_VERBOSITY , num_threads = .LGB_MAX_THREADS ) , data = DTRAIN_RANDOM_CLASSIFICATION @@ -1731,7 +1719,7 @@ test_that("lgb.train() works when you give a function for eval", { # the difference metrics shouldn't have been mixed up with each other results <- bst$record_evals[["valid1"]] - expect_true(abs(results[["constant_metric"]][["eval"]][[1L]] - CONSTANT_METRIC_VALUE) < TOLERANCE) + expect_true(abs(results[["constant_metric"]][["eval"]][[1L]] - CONSTANT_METRIC_VALUE) < .LGB_NUMERIC_TOLERANCE) }) test_that("lgb.train() works with early stopping for regression with a metric that should be minimized", { @@ -1770,7 +1758,7 @@ test_that("lgb.train() works with early stopping for regression with a metric th ) , min_data_in_bin = 5L , early_stopping_rounds = early_stopping_rounds - , verbose = VERBOSITY + , verbose = .LGB_VERBOSITY , num_threads = .LGB_MAX_THREADS ) , data = dtrain @@ -1823,7 +1811,7 @@ test_that("lgb.train() supports non-ASCII feature names", { , obj = "regression" , params = list( metric = "rmse" - , verbose = VERBOSITY + , verbose = .LGB_VERBOSITY , num_threads = .LGB_MAX_THREADS ) , colnames = feature_names @@ -1834,7 +1822,7 @@ test_that("lgb.train() supports non-ASCII feature names", { # UTF-8 strings are not well-supported on Windows # * https://developer.r-project.org/Blog/public/2020/05/02/utf-8-support-on-windows/ # * https://developer.r-project.org/Blog/public/2020/07/30/windows/utf-8-build-of-r-and-cran-packages/index.html - if (UTF8_LOCALE && !ON_WINDOWS) { + if (.LGB_UTF8_LOCALE && !.LGB_ON_WINDOWS) { expect_identical( dumped_model[["feature_names"]] , feature_names @@ -1864,7 +1852,7 @@ test_that("lgb.train() works with integer, double, and numeric data", { , min_data_in_leaf = 1L , learning_rate = 0.01 , seed = 708L - , verbose = VERBOSITY + , verbose = .LGB_VERBOSITY ) , nrounds = nrounds ) @@ -1877,7 +1865,7 @@ test_that("lgb.train() works with integer, double, and numeric data", { # should have achieved expected performance preds <- predict(bst, X) mae <- mean(abs(y - preds)) - expect_true(abs(mae - expected_mae) < TOLERANCE) + expect_true(abs(mae - expected_mae) < .LGB_NUMERIC_TOLERANCE) } }) @@ -1969,7 +1957,7 @@ test_that("when early stopping is not activated, best_iter and best_score come f , metric = "rmse" , learning_rate = 1.5 , num_leaves = 5L - , verbose = VERBOSITY + , verbose = .LGB_VERBOSITY , num_threads = .LGB_MAX_THREADS ) @@ -2132,7 +2120,7 @@ test_that("lightgbm.train() gives the correct best_score and best_iter for a met , metric = "auc" , learning_rate = 1.5 , num_leaves = 5L - , verbose = VERBOSITY + , verbose = .LGB_VERBOSITY , num_threads = .LGB_MAX_THREADS ) ) @@ -2218,7 +2206,7 @@ test_that("lgb.cv() works when you specify both 'metric' and 'eval' with strings params = list( objective = "binary" , metric = "binary_error" - , verbose = VERBOSITY + , verbose = .LGB_VERBOSITY , num_threads = .LGB_MAX_THREADS ) , data = DTRAIN_RANDOM_CLASSIFICATION @@ -2237,8 +2225,8 @@ test_that("lgb.cv() works when you specify both 'metric' and 'eval' with strings # the difference metrics shouldn't have been mixed up with each other results <- bst$record_evals[["valid"]] - expect_true(abs(results[["binary_error"]][["eval"]][[1L]] - 0.5005654) < TOLERANCE) - expect_true(abs(results[["binary_logloss"]][["eval"]][[1L]] - 0.7011232) < TOLERANCE) + expect_true(abs(results[["binary_error"]][["eval"]][[1L]] - 0.5005654) < .LGB_NUMERIC_TOLERANCE) + expect_true(abs(results[["binary_logloss"]][["eval"]][[1L]] - 0.7011232) < .LGB_NUMERIC_TOLERANCE) # all boosters should have been created expect_length(bst$boosters, nfolds) @@ -2253,7 +2241,7 @@ test_that("lgb.cv() works when you give a function for eval", { params = list( objective = "binary" , metric = "None" - , verbose = VERBOSITY + , verbose = .LGB_VERBOSITY , num_threads = .LGB_MAX_THREADS ) , data = DTRAIN_RANDOM_CLASSIFICATION @@ -2264,7 +2252,7 @@ test_that("lgb.cv() works when you give a function for eval", { # the difference metrics shouldn't have been mixed up with each other results <- bst$record_evals[["valid"]] - expect_true(abs(results[["constant_metric"]][["eval"]][[1L]] - CONSTANT_METRIC_VALUE) < TOLERANCE) + expect_true(abs(results[["constant_metric"]][["eval"]][[1L]] - CONSTANT_METRIC_VALUE) < .LGB_NUMERIC_TOLERANCE) expect_named(results, "constant_metric") }) @@ -2280,7 +2268,7 @@ test_that("If first_metric_only is TRUE, lgb.cv() decides to stop early based on , metric = "None" , early_stopping_rounds = early_stopping_rounds , first_metric_only = TRUE - , verbose = VERBOSITY + , verbose = .LGB_VERBOSITY , num_threads = .LGB_MAX_THREADS ) , data = DTRAIN_RANDOM_REGRESSION @@ -2338,7 +2326,7 @@ test_that("early stopping works with lgb.cv()", { , metric = "None" , early_stopping_rounds = early_stopping_rounds , first_metric_only = TRUE - , verbose = VERBOSITY + , verbose = .LGB_VERBOSITY , num_threads = .LGB_MAX_THREADS ) , data = DTRAIN_RANDOM_REGRESSION @@ -2520,7 +2508,7 @@ test_that("lgb.train() fit on linearly-relatead data improves when using linear params <- list( objective = "regression" - , verbose = VERBOSITY + , verbose = .LGB_VERBOSITY , metric = "mse" , seed = 0L , num_leaves = 2L @@ -2555,7 +2543,7 @@ test_that("lgb.train() with linear learner fails already-constructed dataset wit set.seed(708L) params <- list( objective = "regression" - , verbose = VERBOSITY + , verbose = .LGB_VERBOSITY , metric = "mse" , seed = 0L , num_leaves = 2L @@ -2597,7 +2585,7 @@ test_that("lgb.train() works with linear learners even if Dataset has missing va params <- list( objective = "regression" - , verbose = VERBOSITY + , verbose = .LGB_VERBOSITY , metric = "mse" , seed = 0L , num_leaves = 2L @@ -2645,7 +2633,7 @@ test_that("lgb.train() works with linear learners, bagging, and a Dataset that h params <- list( objective = "regression" - , verbose = VERBOSITY + , verbose = .LGB_VERBOSITY , metric = "mse" , seed = 0L , num_leaves = 2L @@ -2806,7 +2794,7 @@ test_that(paste0("lgb.train() gives same result when interaction_constraints is params <- list( objective = "regression" , interaction_constraints = list(c(1L, 2L), 3L) - , verbose = VERBOSITY + , verbose = .LGB_VERBOSITY , num_threads = .LGB_MAX_THREADS ) bst <- lightgbm( @@ -2820,7 +2808,7 @@ test_that(paste0("lgb.train() gives same result when interaction_constraints is params <- list( objective = "regression" , interaction_constraints = list(c(cnames[[1L]], cnames[[2L]]), cnames[[3L]]) - , verbose = VERBOSITY + , verbose = .LGB_VERBOSITY , num_threads = .LGB_MAX_THREADS ) bst <- lightgbm( @@ -2833,7 +2821,7 @@ test_that(paste0("lgb.train() gives same result when interaction_constraints is params <- list( objective = "regression" , interaction_constraints = list(c(cnames[[1L]], cnames[[2L]]), 3L) - , verbose = VERBOSITY + , verbose = .LGB_VERBOSITY , num_threads = .LGB_MAX_THREADS ) bst <- lightgbm( @@ -2855,7 +2843,7 @@ test_that(paste0("lgb.train() gives same results when using interaction_constrai params <- list( objective = "regression" , interaction_constraints = list(c(1L, 2L), 3L) - , verbose = VERBOSITY + , verbose = .LGB_VERBOSITY , num_threads = .LGB_MAX_THREADS ) bst <- lightgbm( @@ -2869,7 +2857,7 @@ test_that(paste0("lgb.train() gives same results when using interaction_constrai params <- list( objective = "regression" , interaction_constraints = list(c(new_colnames[1L], new_colnames[2L]), new_colnames[3L]) - , verbose = VERBOSITY + , verbose = .LGB_VERBOSITY , num_threads = .LGB_MAX_THREADS ) bst <- lightgbm( @@ -3015,7 +3003,7 @@ for (x3_to_categorical in c(TRUE, FALSE)) { , monotone_constraints = c(1L, -1L, 0L) , monotone_constraints_method = monotone_constraints_method , use_missing = FALSE - , verbose = VERBOSITY + , verbose = .LGB_VERBOSITY , num_threads = .LGB_MAX_THREADS ) constrained_model <- lgb.train( @@ -3040,7 +3028,7 @@ test_that("lightgbm() accepts objective as function argument and under params", , label = train$label , params = list(objective = "regression_l1", num_threads = .LGB_MAX_THREADS) , nrounds = 5L - , verbose = VERBOSITY + , verbose = .LGB_VERBOSITY ) expect_equal(bst1$params$objective, "regression_l1") model_txt_lines <- strsplit( @@ -3056,7 +3044,7 @@ test_that("lightgbm() accepts objective as function argument and under params", , label = train$label , objective = "regression_l1" , nrounds = 5L - , verbose = VERBOSITY + , verbose = .LGB_VERBOSITY ) expect_equal(bst2$params$objective, "regression_l1") model_txt_lines <- strsplit( @@ -3075,7 +3063,7 @@ test_that("lightgbm() prioritizes objective under params over objective as funct , objective = "regression" , params = list(objective = "regression_l1", num_threads = .LGB_MAX_THREADS) , nrounds = 5L - , verbose = VERBOSITY + , verbose = .LGB_VERBOSITY ) expect_equal(bst1$params$objective, "regression_l1") model_txt_lines <- strsplit( @@ -3092,7 +3080,7 @@ test_that("lightgbm() prioritizes objective under params over objective as funct , objective = "regression" , params = list(loss = "regression_l1", num_threads = .LGB_MAX_THREADS) , nrounds = 5L - , verbose = VERBOSITY + , verbose = .LGB_VERBOSITY ) expect_equal(bst2$params$objective, "regression_l1") model_txt_lines <- strsplit( @@ -3110,7 +3098,7 @@ test_that("lightgbm() accepts init_score as function argument", { , label = train$label , objective = "binary" , nrounds = 5L - , verbose = VERBOSITY + , verbose = .LGB_VERBOSITY , params = list(num_threads = .LGB_MAX_THREADS) ) pred1 <- predict(bst1, train$data, type = "raw") @@ -3121,7 +3109,7 @@ test_that("lightgbm() accepts init_score as function argument", { , init_score = pred1 , objective = "binary" , nrounds = 5L - , verbose = VERBOSITY + , verbose = .LGB_VERBOSITY , params = list(num_threads = .LGB_MAX_THREADS) ) pred2 <- predict(bst2, train$data, type = "raw") @@ -3134,7 +3122,7 @@ test_that("lightgbm() defaults to 'regression' objective if objective not otherw data = train$data , label = train$label , nrounds = 5L - , verbose = VERBOSITY + , verbose = .LGB_VERBOSITY , params = list(num_threads = .LGB_MAX_THREADS) ) expect_equal(bst$params$objective, "regression") @@ -3152,7 +3140,7 @@ test_that("lightgbm() accepts 'num_threads' as either top-level argument or unde data = train$data , label = train$label , nrounds = 5L - , verbose = VERBOSITY + , verbose = .LGB_VERBOSITY , num_threads = 1L ) expect_equal(bst$params$num_threads, 1L) @@ -3167,7 +3155,7 @@ test_that("lightgbm() accepts 'num_threads' as either top-level argument or unde data = train$data , label = train$label , nrounds = 5L - , verbose = VERBOSITY + , verbose = .LGB_VERBOSITY , params = list(num_threads = 1L) ) expect_equal(bst$params$num_threads, 1L) @@ -3182,7 +3170,7 @@ test_that("lightgbm() accepts 'num_threads' as either top-level argument or unde data = train$data , label = train$label , nrounds = 5L - , verbose = VERBOSITY + , verbose = .LGB_VERBOSITY , num_threads = 10L , params = list(num_threads = 1L) ) @@ -3206,7 +3194,7 @@ test_that("lightgbm() accepts 'weight' and 'weights'", { , weights = w , obj = "regression" , nrounds = 5L - , verbose = VERBOSITY + , verbose = .LGB_VERBOSITY , params = list( min_data_in_bin = 1L , min_data_in_leaf = 1L @@ -3262,7 +3250,7 @@ test_that("lightgbm() accepts 'weight' and 'weights'", { expect_equal( object = unlist(record_evals[["valid"]][["auc"]][["eval"]]) , expected = expected_valid_auc - , tolerance = TOLERANCE + , tolerance = .LGB_NUMERIC_TOLERANCE ) expect_named(record_evals, c("start_iter", "valid"), ignore.order = TRUE, ignore.case = FALSE) expect_equal(record_evals[["valid"]][["auc"]][["eval_err"]], list()) @@ -3667,7 +3655,7 @@ test_that("lightgbm() changes objective='auto' appropriately", { data("mtcars") y <- mtcars$mpg x <- as.matrix(mtcars[, -1L]) - model <- lightgbm(x, y, objective = "auto", verbose = VERBOSITY, nrounds = 5L, num_threads = .LGB_MAX_THREADS) + model <- lightgbm(x, y, objective = "auto", verbose = .LGB_VERBOSITY, nrounds = 5L, num_threads = .LGB_MAX_THREADS) expect_equal(model$params$objective, "regression") model_txt_lines <- strsplit( x = model$save_model_to_string() @@ -3680,7 +3668,7 @@ test_that("lightgbm() changes objective='auto' appropriately", { # Binary classification x <- train$data y <- factor(train$label) - model <- lightgbm(x, y, objective = "auto", verbose = VERBOSITY, nrounds = 5L, num_threads = .LGB_MAX_THREADS) + model <- lightgbm(x, y, objective = "auto", verbose = .LGB_VERBOSITY, nrounds = 5L, num_threads = .LGB_MAX_THREADS) expect_equal(model$params$objective, "binary") model_txt_lines <- strsplit( x = model$save_model_to_string() @@ -3693,7 +3681,7 @@ test_that("lightgbm() changes objective='auto' appropriately", { data("iris") y <- factor(iris$Species) x <- as.matrix(iris[, -5L]) - model <- lightgbm(x, y, objective = "auto", verbose = VERBOSITY, nrounds = 5L, num_threads = .LGB_MAX_THREADS) + model <- lightgbm(x, y, objective = "auto", verbose = .LGB_VERBOSITY, nrounds = 5L, num_threads = .LGB_MAX_THREADS) expect_equal(model$params$objective, "multiclass") expect_equal(model$params$num_class, 3L) model_txt_lines <- strsplit( @@ -3712,7 +3700,7 @@ test_that("lightgbm() determines number of classes for non-default multiclass ob x , y , objective = "multiclassova" - , verbose = VERBOSITY + , verbose = .LGB_VERBOSITY , nrounds = 5L , num_threads = .LGB_MAX_THREADS ) @@ -3731,7 +3719,7 @@ test_that("lightgbm() doesn't accept binary classification with non-binary facto y <- factor(iris$Species) x <- as.matrix(iris[, -5L]) expect_error({ - lightgbm(x, y, objective = "binary", verbose = VERBOSITY, nrounds = 5L, num_threads = .LGB_MAX_THREADS) + lightgbm(x, y, objective = "binary", verbose = .LGB_VERBOSITY, nrounds = 5L, num_threads = .LGB_MAX_THREADS) }, regexp = "Factors with >2 levels as labels only allowed for multi-class objectives") }) @@ -3742,7 +3730,7 @@ test_that("lightgbm() doesn't accept multi-class classification with binary fact y <- factor(y) x <- as.matrix(iris[, -5L]) expect_error({ - lightgbm(x, y, objective = "multiclass", verbose = VERBOSITY, nrounds = 5L, num_threads = .LGB_MAX_THREADS) + lightgbm(x, y, objective = "multiclass", verbose = .LGB_VERBOSITY, nrounds = 5L, num_threads = .LGB_MAX_THREADS) }, regexp = "Two-level factors as labels only allowed for objective='binary'") }) @@ -3750,7 +3738,7 @@ test_that("lightgbm() model predictions retain factor levels for multiclass clas data("iris") y <- factor(iris$Species) x <- as.matrix(iris[, -5L]) - model <- lightgbm(x, y, objective = "auto", verbose = VERBOSITY, nrounds = 5L, num_threads = .LGB_MAX_THREADS) + model <- lightgbm(x, y, objective = "auto", verbose = .LGB_VERBOSITY, nrounds = 5L, num_threads = .LGB_MAX_THREADS) pred <- predict(model, x, type = "class") expect_true(is.factor(pred)) @@ -3769,7 +3757,7 @@ test_that("lightgbm() model predictions retain factor levels for binary classifi y[y == "setosa"] <- "versicolor" y <- factor(y) x <- as.matrix(iris[, -5L]) - model <- lightgbm(x, y, objective = "auto", verbose = VERBOSITY, nrounds = 5L, num_threads = .LGB_MAX_THREADS) + model <- lightgbm(x, y, objective = "auto", verbose = .LGB_VERBOSITY, nrounds = 5L, num_threads = .LGB_MAX_THREADS) pred <- predict(model, x, type = "class") expect_true(is.factor(pred)) diff --git a/R-package/tests/testthat/test_custom_objective.R b/R-package/tests/testthat/test_custom_objective.R index 85baacaf8cd3..2c10b9d571dc 100644 --- a/R-package/tests/testthat/test_custom_objective.R +++ b/R-package/tests/testthat/test_custom_objective.R @@ -1,15 +1,9 @@ -VERBOSITY <- as.integer( - Sys.getenv("LIGHTGBM_TEST_VERBOSITY", "-1") -) - data(agaricus.train, package = "lightgbm") data(agaricus.test, package = "lightgbm") dtrain <- lgb.Dataset(agaricus.train$data, label = agaricus.train$label) dtest <- lgb.Dataset(agaricus.test$data, label = agaricus.test$label) watchlist <- list(eval = dtest, train = dtrain) -TOLERANCE <- 1e-6 - logregobj <- function(preds, dtrain) { labels <- get_field(dtrain, "label") preds <- 1.0 / (1.0 + exp(-preds)) @@ -38,7 +32,7 @@ param <- list( , learning_rate = 1.0 , objective = logregobj , metric = "auc" - , verbose = VERBOSITY + , verbose = .LGB_VERBOSITY , num_threads = .LGB_MAX_THREADS ) num_round <- 10L @@ -54,7 +48,7 @@ test_that("using a custom objective, custom eval, and no other metrics works", { params = list( num_leaves = 8L , learning_rate = 1.0 - , verbose = VERBOSITY + , verbose = .LGB_VERBOSITY , num_threads = .LGB_MAX_THREADS ) , data = dtrain @@ -65,11 +59,11 @@ test_that("using a custom objective, custom eval, and no other metrics works", { ) expect_false(is.null(bst$record_evals)) expect_equal(bst$best_iter, 4L) - expect_true(abs(bst$best_score - 0.000621) < TOLERANCE) + expect_true(abs(bst$best_score - 0.000621) < .LGB_NUMERIC_TOLERANCE) eval_results <- bst$eval_valid(feval = evalerror)[[1L]] expect_true(eval_results[["data_name"]] == "eval") - expect_true(abs(eval_results[["value"]] - 0.0006207325) < TOLERANCE) + expect_true(abs(eval_results[["value"]] - 0.0006207325) < .LGB_NUMERIC_TOLERANCE) expect_true(eval_results[["name"]] == "error") expect_false(eval_results[["higher_better"]]) }) @@ -81,7 +75,7 @@ test_that("using a custom objective that returns wrong shape grad or hess raises bad_hess <- function(preds, dtrain) { return(list(grad = rep(1.0, length(preds)), hess = numeric(0L))) } - params <- list(num_leaves = 3L, verbose = VERBOSITY) + params <- list(num_leaves = 3L, verbose = .LGB_VERBOSITY) expect_error({ lgb.train(params = params, data = dtrain, obj = bad_grad) }, sprintf("Expected custom objective function to return grad with length %d, got 0.", nrow(dtrain))) diff --git a/R-package/tests/testthat/test_dataset.R b/R-package/tests/testthat/test_dataset.R index 7ea95655cca2..cf68ce9262a3 100644 --- a/R-package/tests/testthat/test_dataset.R +++ b/R-package/tests/testthat/test_dataset.R @@ -1,7 +1,3 @@ -VERBOSITY <- as.integer( - Sys.getenv("LIGHTGBM_TEST_VERBOSITY", "-1") -) - data(agaricus.train, package = "lightgbm") train_data <- agaricus.train$data[seq_len(1000L), ] train_label <- agaricus.train$label[seq_len(1000L)] @@ -16,7 +12,7 @@ test_that("lgb.Dataset: basic construction, saving, loading", { test_data , label = test_label , params = list( - verbose = VERBOSITY + verbose = .LGB_VERBOSITY ) ) # from dense matrix @@ -30,7 +26,7 @@ test_that("lgb.Dataset: basic construction, saving, loading", { dtest3 <- lgb.Dataset( tmp_file , params = list( - verbose = VERBOSITY + verbose = .LGB_VERBOSITY ) ) lgb.Dataset.construct(dtest3) @@ -376,7 +372,7 @@ test_that("lgb.Dataset: should be able to run lgb.train() immediately after usin data = test_data , label = test_label , params = list( - verbose = VERBOSITY + verbose = .LGB_VERBOSITY ) ) tmp_file <- tempfile(pattern = "lgb.Dataset_") @@ -393,7 +389,7 @@ test_that("lgb.Dataset: should be able to run lgb.train() immediately after usin , metric = "binary_logloss" , num_leaves = 5L , learning_rate = 1.0 - , verbose = VERBOSITY + , verbose = .LGB_VERBOSITY , num_threads = .LGB_MAX_THREADS ) @@ -411,7 +407,7 @@ test_that("lgb.Dataset: should be able to run lgb.cv() immediately after using l data = test_data , label = test_label , params = list( - verbosity = VERBOSITY + verbosity = .LGB_VERBOSITY ) ) tmp_file <- tempfile(pattern = "lgb.Dataset_") @@ -429,7 +425,7 @@ test_that("lgb.Dataset: should be able to run lgb.cv() immediately after using l , num_leaves = 5L , learning_rate = 1.0 , num_iterations = 5L - , verbosity = VERBOSITY + , verbosity = .LGB_VERBOSITY , num_threads = .LGB_MAX_THREADS ) @@ -475,7 +471,7 @@ test_that("lgb.Dataset: should be able to create a Dataset from a text file with data = train_file , params = list( header = TRUE - , verbosity = VERBOSITY + , verbosity = .LGB_VERBOSITY ) ) dtrain$construct() @@ -499,7 +495,7 @@ test_that("lgb.Dataset: should be able to create a Dataset from a text file with data = train_file , params = list( header = FALSE - , verbosity = VERBOSITY + , verbosity = .LGB_VERBOSITY ) ) dtrain$construct() diff --git a/R-package/tests/testthat/test_learning_to_rank.R b/R-package/tests/testthat/test_learning_to_rank.R index c237e78a2d37..6868794cf8ec 100644 --- a/R-package/tests/testthat/test_learning_to_rank.R +++ b/R-package/tests/testthat/test_learning_to_rank.R @@ -1,12 +1,3 @@ -VERBOSITY <- as.integer( - Sys.getenv("LIGHTGBM_TEST_VERBOSITY", "-1") -) - -# numerical tolerance to use when checking metric values -TOLERANCE <- 1e-06 - -ON_32_BIT_WINDOWS <- .Platform$OS.type == "windows" && .Machine$sizeof.pointer != 8L - test_that("learning-to-rank with lgb.train() works as expected", { set.seed(708L) data(agaricus.train, package = "lightgbm") @@ -26,7 +17,7 @@ test_that("learning-to-rank with lgb.train() works as expected", { , ndcg_at = ndcg_at , lambdarank_truncation_level = 3L , learning_rate = 0.001 - , verbose = VERBOSITY + , verbose = .LGB_VERBOSITY , num_threads = .LGB_MAX_THREADS ) model <- lgb.train( @@ -60,15 +51,15 @@ test_that("learning-to-rank with lgb.train() works as expected", { , eval_names ) expect_equal(eval_results[[1L]][["value"]], 0.775) - if (!ON_32_BIT_WINDOWS) { - expect_true(abs(eval_results[[2L]][["value"]] - 0.745986) < TOLERANCE) - expect_true(abs(eval_results[[3L]][["value"]] - 0.7351959) < TOLERANCE) + if (!.LGB_ON_32_BIT_WINDOWS) { + expect_true(abs(eval_results[[2L]][["value"]] - 0.745986) < .LGB_NUMERIC_TOLERANCE) + expect_true(abs(eval_results[[3L]][["value"]] - 0.7351959) < .LGB_NUMERIC_TOLERANCE) } }) test_that("learning-to-rank with lgb.cv() works as expected", { testthat::skip_if( - ON_32_BIT_WINDOWS + .LGB_ON_32_BIT_WINDOWS , message = "Skipping on 32-bit Windows" ) set.seed(708L) @@ -91,7 +82,7 @@ test_that("learning-to-rank with lgb.cv() works as expected", { , label_gain = "0,1,3" , min_data = 1L , learning_rate = 0.01 - , verbose = VERBOSITY + , verbose = .LGB_VERBOSITY , num_threads = .LGB_MAX_THREADS ) nfold <- 4L @@ -115,7 +106,7 @@ test_that("learning-to-rank with lgb.cv() works as expected", { best_score <- cv_bst$best_score expect_true(best_iter > 0L && best_iter <= nrounds) expect_true(best_score > 0.0 && best_score < 1.0) - expect_true(abs(best_score - 0.75) < TOLERANCE) + expect_true(abs(best_score - 0.75) < .LGB_NUMERIC_TOLERANCE) # best_score should be set for the first metric first_metric <- eval_names[[1L]] @@ -138,19 +129,19 @@ test_that("learning-to-rank with lgb.cv() works as expected", { # first and last value of each metric should be as expected ndcg1_values <- c(0.675, 0.725, 0.65, 0.725, 0.75, 0.725, 0.75, 0.725, 0.75, 0.75) - expect_true(all(abs(unlist(eval_results[["ndcg@1"]][["eval"]]) - ndcg1_values) < TOLERANCE)) + expect_true(all(abs(unlist(eval_results[["ndcg@1"]][["eval"]]) - ndcg1_values) < .LGB_NUMERIC_TOLERANCE)) ndcg2_values <- c( 0.6556574, 0.6669721, 0.6306574, 0.6476294, 0.6629581, 0.6476294, 0.6629581, 0.6379581, 0.7113147, 0.6823008 ) - expect_true(all(abs(unlist(eval_results[["ndcg@2"]][["eval"]]) - ndcg2_values) < TOLERANCE)) + expect_true(all(abs(unlist(eval_results[["ndcg@2"]][["eval"]]) - ndcg2_values) < .LGB_NUMERIC_TOLERANCE)) ndcg3_values <- c( 0.6484639, 0.6571238, 0.6469279, 0.6540516, 0.6481857, 0.6481857, 0.6481857, 0.6466496, 0.7027939, 0.6629898 ) - expect_true(all(abs(unlist(eval_results[["ndcg@3"]][["eval"]]) - ndcg3_values) < TOLERANCE)) + expect_true(all(abs(unlist(eval_results[["ndcg@3"]][["eval"]]) - ndcg3_values) < .LGB_NUMERIC_TOLERANCE)) # check details of each booster for (bst in cv_bst$boosters) { diff --git a/R-package/tests/testthat/test_lgb.Booster.R b/R-package/tests/testthat/test_lgb.Booster.R index 1737614b1243..1ff038598db1 100644 --- a/R-package/tests/testthat/test_lgb.Booster.R +++ b/R-package/tests/testthat/test_lgb.Booster.R @@ -1,10 +1,3 @@ -VERBOSITY <- as.integer( - Sys.getenv("LIGHTGBM_TEST_VERBOSITY", "-1") -) - -ON_WINDOWS <- .Platform$OS.type == "windows" -TOLERANCE <- 1e-6 - test_that("Booster$finalize() should not fail", { X <- as.matrix(as.integer(iris[, "Species"]), ncol = 1L) y <- iris[["Sepal.Length"]] @@ -15,7 +8,7 @@ test_that("Booster$finalize() should not fail", { objective = "regression" , num_threads = .LGB_MAX_THREADS ) - , verbose = VERBOSITY + , verbose = .LGB_VERBOSITY , nrounds = 3L ) expect_true(lgb.is.Booster(bst)) @@ -66,7 +59,7 @@ test_that("lgb.get.eval.result() should throw an informative error for incorrect , metric = "l2" , min_data = 1L , learning_rate = 1.0 - , verbose = VERBOSITY + , verbose = .LGB_VERBOSITY , num_threads = .LGB_MAX_THREADS ) , data = dtrain @@ -101,7 +94,7 @@ test_that("lgb.get.eval.result() should throw an informative error for incorrect , metric = "l2" , min_data = 1L , learning_rate = 1.0 - , verbose = VERBOSITY + , verbose = .LGB_VERBOSITY ) , data = dtrain , nrounds = 5L @@ -135,7 +128,7 @@ test_that("lgb.load() gives the expected error messages given different incorrec objective = "binary" , num_leaves = 4L , learning_rate = 1.0 - , verbose = VERBOSITY + , verbose = .LGB_VERBOSITY ) , nrounds = 2L ) @@ -186,7 +179,7 @@ test_that("Loading a Booster from a text file works", { , metric = c("mape", "average_precision") , learning_rate = 1.0 , objective = "binary" - , verbosity = VERBOSITY + , verbosity = .LGB_VERBOSITY , num_threads = .LGB_MAX_THREADS ) bst <- lightgbm( @@ -237,7 +230,7 @@ test_that("boosters with linear models at leaves can be written to text file and data = dtrain , nrounds = 10L , params = params - , verbose = VERBOSITY + , verbose = .LGB_VERBOSITY ) expect_true(lgb.is.Booster(bst)) @@ -271,7 +264,7 @@ test_that("Loading a Booster from a string works", { num_leaves = 4L , learning_rate = 1.0 , objective = "binary" - , verbose = VERBOSITY + , verbose = .LGB_VERBOSITY , num_threads = .LGB_MAX_THREADS ) , nrounds = 2L @@ -307,7 +300,7 @@ test_that("Saving a large model to string should work", { , num_threads = .LGB_MAX_THREADS ) , nrounds = 500L - , verbose = VERBOSITY + , verbose = .LGB_VERBOSITY ) pred <- predict(bst, train$data) @@ -351,7 +344,7 @@ test_that("Saving a large model to JSON should work", { , num_threads = .LGB_MAX_THREADS ) , nrounds = 200L - , verbose = VERBOSITY + , verbose = .LGB_VERBOSITY ) model_json <- bst$dump_model() @@ -378,7 +371,7 @@ test_that("If a string and a file are both passed to lgb.load() the file is used num_leaves = 4L , learning_rate = 1.0 , objective = "binary" - , verbose = VERBOSITY + , verbose = .LGB_VERBOSITY , num_threads = .LGB_MAX_THREADS ) , nrounds = 2L @@ -413,7 +406,7 @@ test_that("Creating a Booster from a Dataset should work", { bst <- Booster$new( params = list( objective = "binary" - , verbose = VERBOSITY + , verbose = .LGB_VERBOSITY , num_threads = .LGB_MAX_THREADS ), train_set = dtrain @@ -435,7 +428,7 @@ test_that("Creating a Booster from a Dataset with an existing predictor should w num_leaves = 4L , learning_rate = 1.0 , objective = "binary" - , verbose = VERBOSITY + , verbose = .LGB_VERBOSITY , num_threads = .LGB_MAX_THREADS ) , nrounds = nrounds @@ -449,7 +442,7 @@ test_that("Creating a Booster from a Dataset with an existing predictor should w bst_from_ds <- Booster$new( train_set = dtest , params = list( - verbose = VERBOSITY + verbose = .LGB_VERBOSITY , num_threads = .LGB_MAX_THREADS ) ) @@ -473,7 +466,7 @@ test_that("Booster$eval() should work on a Dataset stored in a binary file", { objective = "regression" , metric = "l2" , num_leaves = 4L - , verbose = VERBOSITY + , verbose = .LGB_VERBOSITY , num_threads = .LGB_MAX_THREADS ) , data = dtrain @@ -504,14 +497,14 @@ test_that("Booster$eval() should work on a Dataset stored in a binary file", { eval_from_file <- bst$eval( data = lgb.Dataset( data = test_file - , params = list(verbose = VERBOSITY, num_threads = .LGB_MAX_THREADS) + , params = list(verbose = .LGB_VERBOSITY, num_threads = .LGB_MAX_THREADS) )$construct() , name = "test" ) - expect_true(abs(eval_in_mem[[1L]][["value"]] - 0.1744423) < TOLERANCE) + expect_true(abs(eval_in_mem[[1L]][["value"]] - 0.1744423) < .LGB_NUMERIC_TOLERANCE) # refer to https://github.com/microsoft/LightGBM/issues/4680 - if (isTRUE(ON_WINDOWS)) { + if (isTRUE(.LGB_ON_WINDOWS)) { expect_equal(eval_in_mem, eval_from_file) } else { expect_identical(eval_in_mem, eval_from_file) @@ -532,7 +525,7 @@ test_that("Booster$rollback_one_iter() should work as expected", { num_leaves = 4L , learning_rate = 1.0 , objective = "binary" - , verbose = VERBOSITY + , verbose = .LGB_VERBOSITY , num_threads = .LGB_MAX_THREADS ) , nrounds = nrounds @@ -567,7 +560,7 @@ test_that("Booster$update() passing a train_set works as expected", { num_leaves = 4L , learning_rate = 1.0 , objective = "binary" - , verbose = VERBOSITY + , verbose = .LGB_VERBOSITY , num_threads = .LGB_MAX_THREADS ) , nrounds = nrounds @@ -578,7 +571,7 @@ test_that("Booster$update() passing a train_set works as expected", { train_set = Dataset$new( data = agaricus.train$data , label = agaricus.train$label - , params = list(verbose = VERBOSITY) + , params = list(verbose = .LGB_VERBOSITY) ) ) expect_true(lgb.is.Booster(bst)) @@ -592,7 +585,7 @@ test_that("Booster$update() passing a train_set works as expected", { num_leaves = 4L , learning_rate = 1.0 , objective = "binary" - , verbose = VERBOSITY + , verbose = .LGB_VERBOSITY , num_threads = .LGB_MAX_THREADS ) , nrounds = nrounds + 1L @@ -618,7 +611,7 @@ test_that("Booster$update() throws an informative error if you provide a non-Dat num_leaves = 4L , learning_rate = 1.0 , objective = "binary" - , verbose = VERBOSITY + , verbose = .LGB_VERBOSITY , num_threads = .LGB_MAX_THREADS ) , nrounds = nrounds @@ -646,7 +639,7 @@ test_that("Booster should store parameters and Booster$reset_parameter() should , metric = c("multi_logloss", "multi_error") , boosting = "gbdt" , num_class = 5L - , verbose = VERBOSITY + , verbose = .LGB_VERBOSITY , num_threads = .LGB_MAX_THREADS ) bst <- Booster$new( @@ -674,7 +667,7 @@ test_that("Booster$params should include dataset params, before and after Booste objective = "binary" , max_depth = 4L , bagging_fraction = 0.8 - , verbose = VERBOSITY + , verbose = .LGB_VERBOSITY , num_threads = .LGB_MAX_THREADS ) bst <- Booster$new( @@ -687,7 +680,7 @@ test_that("Booster$params should include dataset params, before and after Booste objective = "binary" , max_depth = 4L , bagging_fraction = 0.8 - , verbose = VERBOSITY + , verbose = .LGB_VERBOSITY , num_threads = .LGB_MAX_THREADS , max_bin = 17L ) @@ -699,7 +692,7 @@ test_that("Booster$params should include dataset params, before and after Booste objective = "binary" , max_depth = 4L , bagging_fraction = 0.9 - , verbose = VERBOSITY + , verbose = .LGB_VERBOSITY , num_threads = .LGB_MAX_THREADS , max_bin = 17L ) @@ -718,7 +711,7 @@ test_that("Saving a model with different feature importance types works", { num_leaves = 4L , learning_rate = 1.0 , objective = "binary" - , verbose = VERBOSITY + , verbose = .LGB_VERBOSITY , num_threads = .LGB_MAX_THREADS ) , nrounds = 2L @@ -774,7 +767,7 @@ test_that("Saving a model with unknown importance type fails", { num_leaves = 4L , learning_rate = 1.0 , objective = "binary" - , verbose = VERBOSITY + , verbose = .LGB_VERBOSITY , num_threads = .LGB_MAX_THREADS ) , nrounds = 2L @@ -815,7 +808,7 @@ test_that("all parameters are stored correctly with save_model_to_string()", { ) , data = dtrain , nrounds = nrounds - , verbose = VERBOSITY + , verbose = .LGB_VERBOSITY ) model_str <- bst$save_model_to_string() @@ -832,7 +825,7 @@ test_that("all parameters are stored correctly with save_model_to_string()", { expect_equal(sum(params_in_file == "[objective: regression]"), 1L) expect_equal(sum(startsWith(params_in_file, "[verbosity:")), 1L) - expect_equal(sum(params_in_file == sprintf("[verbosity: %i]", VERBOSITY)), 1L) + expect_equal(sum(params_in_file == sprintf("[verbosity: %i]", .LGB_VERBOSITY)), 1L) # early stopping should be off by default expect_equal(sum(startsWith(params_in_file, "[early_stopping_round:")), 1L) @@ -879,7 +872,7 @@ test_that("early_stopping, num_iterations are stored correctly in model string e , valids = list( "random_valid" = dvalid ) - , verbose = VERBOSITY + , verbose = .LGB_VERBOSITY ) model_str <- bst$save_model_to_string() @@ -911,7 +904,7 @@ test_that("Booster: method calls Booster with a null handle should raise an info , num_threads = .LGB_MAX_THREADS ) , data = dtrain - , verbose = VERBOSITY + , verbose = .LGB_VERBOSITY , nrounds = 5L , valids = list( train = dtrain @@ -987,7 +980,7 @@ test_that("Booster$new() using a Dataset with a null handle should raise an info bst <- Booster$new( train_set = dtrain , params = list( - verbose = VERBOSITY + verbose = .LGB_VERBOSITY ) ) }, regexp = "Attempting to create a Dataset without any raw data") @@ -1098,7 +1091,7 @@ test_that("lgb.cv() correctly handles passing through params to the model file", , n_iter = n_iter , early_stopping_round = early_stopping_round , n_iter_no_change = n_iter_no_change - , verbose = VERBOSITY + , verbose = .LGB_VERBOSITY , num_threads = .LGB_MAX_THREADS ) @@ -1108,7 +1101,7 @@ test_that("lgb.cv() correctly handles passing through params to the model file", , nrounds = nrounds_kwarg , early_stopping_rounds = early_stopping_round_kwarg , nfold = 3L - , verbose = VERBOSITY + , verbose = .LGB_VERBOSITY ) for (bst in cv_bst$boosters) { @@ -1143,7 +1136,7 @@ test_that("params (including dataset params) should be stored in .rds file for B objective = "binary" , max_depth = 4L , bagging_fraction = 0.8 - , verbose = VERBOSITY + , verbose = .LGB_VERBOSITY , num_threads = .LGB_MAX_THREADS ) bst <- Booster$new( @@ -1160,7 +1153,7 @@ test_that("params (including dataset params) should be stored in .rds file for B objective = "binary" , max_depth = 4L , bagging_fraction = 0.8 - , verbose = VERBOSITY + , verbose = .LGB_VERBOSITY , num_threads = .LGB_MAX_THREADS , max_bin = 17L ) @@ -1180,7 +1173,7 @@ test_that("params (including dataset params) should be stored in .rds file for B objective = "binary" , max_depth = 4L , bagging_fraction = 0.8 - , verbose = VERBOSITY + , verbose = .LGB_VERBOSITY , num_threads = .LGB_MAX_THREADS ) bst <- Booster$new( @@ -1197,7 +1190,7 @@ test_that("params (including dataset params) should be stored in .rds file for B objective = "binary" , max_depth = 4L , bagging_fraction = 0.8 - , verbose = VERBOSITY + , verbose = .LGB_VERBOSITY , num_threads = .LGB_MAX_THREADS , max_bin = 17L ) @@ -1212,7 +1205,7 @@ test_that("Handle is automatically restored when calling predict", { , nrounds = 5L , obj = "binary" , params = list( - verbose = VERBOSITY + verbose = .LGB_VERBOSITY ) , num_threads = .LGB_MAX_THREADS ) @@ -1236,7 +1229,7 @@ test_that("boosters with linear models at leaves work with saveRDS.lgb.Booster a params <- list( objective = "regression" - , verbose = VERBOSITY + , verbose = .LGB_VERBOSITY , metric = "mse" , seed = 0L , num_leaves = 2L @@ -1276,7 +1269,7 @@ test_that("boosters with linear models at leaves can be written to RDS and re-lo params <- list( objective = "regression" - , verbose = VERBOSITY + , verbose = .LGB_VERBOSITY , metric = "mse" , seed = 0L , num_leaves = 2L @@ -1386,7 +1379,7 @@ test_that("Booster's print, show, and summary work correctly", { min_data_in_bin = 1L ) ) - , verbose = VERBOSITY + , verbose = .LGB_VERBOSITY , nrounds = 5L ) .check_methods_work(model) @@ -1398,7 +1391,7 @@ test_that("Booster's print, show, and summary work correctly", { as.matrix(iris[, -5L]) , label = as.numeric(factor(iris$Species)) - 1.0 ) - , verbose = VERBOSITY + , verbose = .LGB_VERBOSITY , nrounds = 5L ) .check_methods_work(model) @@ -1431,7 +1424,7 @@ test_that("Booster's print, show, and summary work correctly", { ) , obj = .logregobj , eval = .evalerror - , verbose = VERBOSITY + , verbose = .LGB_VERBOSITY , nrounds = 5L , params = list(num_threads = .LGB_MAX_THREADS) ) @@ -1454,7 +1447,7 @@ test_that("LGBM_BoosterGetNumFeature_R returns correct outputs", { min_data_in_bin = 1L ) ) - , verbose = VERBOSITY + , verbose = .LGB_VERBOSITY , nrounds = 5L ) ncols <- .Call(LGBM_BoosterGetNumFeature_R, model$.__enclos_env__$private$handle) @@ -1467,7 +1460,7 @@ test_that("LGBM_BoosterGetNumFeature_R returns correct outputs", { as.matrix(iris[, -5L]) , label = as.numeric(factor(iris$Species)) - 1.0 ) - , verbose = VERBOSITY + , verbose = .LGB_VERBOSITY , nrounds = 5L ) ncols <- .Call(LGBM_BoosterGetNumFeature_R, model$.__enclos_env__$private$handle) diff --git a/R-package/tests/testthat/test_lgb.interprete.R b/R-package/tests/testthat/test_lgb.interprete.R index 45fc78002fea..322a80a55bc5 100644 --- a/R-package/tests/testthat/test_lgb.interprete.R +++ b/R-package/tests/testthat/test_lgb.interprete.R @@ -1,7 +1,3 @@ -VERBOSITY <- as.integer( - Sys.getenv("LIGHTGBM_TEST_VERBOSITY", "-1") -) - .sigmoid <- function(x) { 1.0 / (1.0 + exp(-x)) } @@ -30,7 +26,7 @@ test_that("lgb.intereprete works as expected for binary classification", { , max_depth = -1L , min_data_in_leaf = 1L , min_sum_hessian_in_leaf = 1.0 - , verbose = VERBOSITY + , verbose = .LGB_VERBOSITY , num_threads = .LGB_MAX_THREADS ) model <- lgb.train( @@ -83,7 +79,7 @@ test_that("lgb.intereprete works as expected for multiclass classification", { , num_class = 3L , learning_rate = 0.00001 , min_data = 1L - , verbose = VERBOSITY + , verbose = .LGB_VERBOSITY , num_threads = .LGB_MAX_THREADS ) model <- lgb.train( diff --git a/R-package/tests/testthat/test_lgb.plot.importance.R b/R-package/tests/testthat/test_lgb.plot.importance.R index a0a9bd505c84..e7ff63facde5 100644 --- a/R-package/tests/testthat/test_lgb.plot.importance.R +++ b/R-package/tests/testthat/test_lgb.plot.importance.R @@ -1,7 +1,3 @@ -VERBOSITY <- as.integer( - Sys.getenv("LIGHTGBM_TEST_VERBOSITY", "-1") -) - test_that("lgb.plot.importance() should run without error for well-formed inputs", { data(agaricus.train, package = "lightgbm") train <- agaricus.train @@ -13,7 +9,7 @@ test_that("lgb.plot.importance() should run without error for well-formed inputs , max_depth = -1L , min_data_in_leaf = 1L , min_sum_hessian_in_leaf = 1.0 - , verbosity = VERBOSITY + , verbosity = .LGB_VERBOSITY , num_threads = .LGB_MAX_THREADS ) model <- lgb.train(params, dtrain, 3L) diff --git a/R-package/tests/testthat/test_lgb.plot.interpretation.R b/R-package/tests/testthat/test_lgb.plot.interpretation.R index fbad01cce1d3..6cba9927942a 100644 --- a/R-package/tests/testthat/test_lgb.plot.interpretation.R +++ b/R-package/tests/testthat/test_lgb.plot.interpretation.R @@ -1,7 +1,3 @@ -VERBOSITY <- as.integer( - Sys.getenv("LIGHTGBM_TEST_VERBOSITY", "-1") -) - .sigmoid <- function(x) { 1.0 / (1.0 + exp(-x)) } @@ -30,7 +26,7 @@ test_that("lgb.plot.interepretation works as expected for binary classification" , max_depth = -1L , min_data_in_leaf = 1L , min_sum_hessian_in_leaf = 1.0 - , verbosity = VERBOSITY + , verbosity = .LGB_VERBOSITY , num_threads = .LGB_MAX_THREADS ) model <- lgb.train( @@ -87,7 +83,7 @@ test_that("lgb.plot.interepretation works as expected for multiclass classificat params = params , data = dtrain , nrounds = 3L - , verbose = VERBOSITY + , verbose = .LGB_VERBOSITY ) num_trees <- 5L tree_interpretation <- lgb.interprete( diff --git a/R-package/tests/testthat/test_weighted_loss.R b/R-package/tests/testthat/test_weighted_loss.R index 6ea864ee08a9..f9f9675c3bb9 100644 --- a/R-package/tests/testthat/test_weighted_loss.R +++ b/R-package/tests/testthat/test_weighted_loss.R @@ -1,7 +1,3 @@ -VERBOSITY <- as.integer( - Sys.getenv("LIGHTGBM_TEST_VERBOSITY", "-1") -) - test_that("Gamma regression reacts on 'weight'", { n <- 100L set.seed(87L) @@ -17,7 +13,7 @@ test_that("Gamma regression reacts on 'weight'", { params = params , data = dtrain , nrounds = 4L - , verbose = VERBOSITY + , verbose = .LGB_VERBOSITY ) pred_unweighted <- predict(bst, X_pred) @@ -31,7 +27,7 @@ test_that("Gamma regression reacts on 'weight'", { params = params , data = dtrain , nrounds = 4L - , verbose = VERBOSITY + , verbose = .LGB_VERBOSITY ) pred_weighted_1 <- predict(bst, X_pred) @@ -45,7 +41,7 @@ test_that("Gamma regression reacts on 'weight'", { params = params , data = dtrain , nrounds = 4L - , verbose = VERBOSITY + , verbose = .LGB_VERBOSITY ) pred_weighted_2 <- predict(bst, X_pred) @@ -59,7 +55,7 @@ test_that("Gamma regression reacts on 'weight'", { params = params , data = dtrain , nrounds = 4L - , verbose = VERBOSITY + , verbose = .LGB_VERBOSITY ) pred_weighted <- predict(bst, X_pred) From 665c47313d6938a8d80559c824073a142c8bd870 Mon Sep 17 00:00:00 2001 From: James Lamb Date: Sun, 23 Jul 2023 22:37:11 -0500 Subject: [PATCH 5/8] [ci] simplify CODEOWNERS (#5998) --- .github/CODEOWNERS | 54 +--------------------------------------------- 1 file changed, 1 insertion(+), 53 deletions(-) diff --git a/.github/CODEOWNERS b/.github/CODEOWNERS index ce6da9f6e7fb..02b5cfbdae23 100644 --- a/.github/CODEOWNERS +++ b/.github/CODEOWNERS @@ -7,56 +7,4 @@ # offer a reasonable automatic best-guess # catch-all rule (this only gets matched if no rules below match) -* @guolinke @StrikerRUS @jameslamb @shiyu1994 - -# other catch-alls that will get matched if specific rules below are not matched -*.R @jameslamb @jmoralez -*.py @StrikerRUS @jmoralez @jameslamb @shiyu1994 -*.cpp @guolinke @shiyu1994 -*.h @guolinke @shiyu1994 - -# main C++ code -include/ @guolinke @shiyu1994 -src/ @guolinke @shiyu1994 -CMakeLists.txt @guolinke @jameslamb @StrikerRUS @shiyu1994 -tests/c_api_test/ @guolinke @shiyu1994 -tests/cpp_tests/ @guolinke @shiyu1994 -tests/data/ @guolinke @shiyu1994 -windows/ @guolinke @StrikerRUS @shiyu1994 - -# R code -build_r.R @jameslamb @StrikerRUS @jmoralez -build-cran-package.sh @jameslamb @StrikerRUS @jmoralez -R-package/ @jameslamb @jmoralez - -# Python code -python-package/ @StrikerRUS @shiyu1994 @jameslamb @jmoralez - -# Dask integration -python-package/lightgbm/dask.py @jameslamb @jmoralez -tests/python_package_test/test_dask.py @jameslamb @jmoralez - -# helpers -helpers/ @StrikerRUS @guolinke - -# CI administrative stuff -.ci/ @StrikerRUS @jameslamb -docs/ @StrikerRUS @jameslamb -examples/ @StrikerRUS @jameslamb @guolinke @jmoralez -*.yml @StrikerRUS @jameslamb -.vsts-ci.yml @StrikerRUS @jameslamb - -# docker setup -docker/ @StrikerRUS @jameslamb -docker/dockerfile-cli @guolinke @shiyu1994 @StrikerRUS @jameslamb -docker/gpu/ @StrikerRUS @jameslamb -docker/dockerfile-python @StrikerRUS @shiyu1994 @jameslamb @jmoralez -docker/dockerfile-r @jameslamb @jmoralez - -# GPU code -docs/GPU-*.rst @shiyu1994 @guolinke -src/treelearner/gpu_tree_learner.cpp @guolinke @shiyu1994 -src/treelearner/tree_learner.cpp @guolinke @shiyu1994 - -# JAVA code -swig/ @guolinke @shiyu1994 +* @guolinke @jameslamb @shiyu1994 @jmoralez From 170a93044bf7c8c3d90d64b4ceeb21d3c8bf1fbb Mon Sep 17 00:00:00 2001 From: david-cortes Date: Fri, 4 Aug 2023 17:40:57 +0200 Subject: [PATCH 6/8] [R-package] Fix error when passing categorical features to lightgbm() (fixes #6000) (#6003) --- R-package/R/lgb.train.R | 8 +++----- R-package/tests/testthat/test_basic.R | 15 +++++++++++++++ 2 files changed, 18 insertions(+), 5 deletions(-) diff --git a/R-package/R/lgb.train.R b/R-package/R/lgb.train.R index 4260f81cd3fe..20916c9844b5 100644 --- a/R-package/R/lgb.train.R +++ b/R-package/R/lgb.train.R @@ -154,6 +154,9 @@ lgb.train <- function(params = list(), # Construct datasets, if needed data$update_params(params = params) + if (!is.null(categorical_feature)) { + data$set_categorical_feature(categorical_feature) + } data$construct() # Check interaction constraints @@ -179,11 +182,6 @@ lgb.train <- function(params = list(), data$set_colnames(colnames) } - # Write categorical features - if (!is.null(categorical_feature)) { - data$set_categorical_feature(categorical_feature) - } - valid_contain_train <- FALSE train_data_name <- "train" reduced_valid_sets <- list() diff --git a/R-package/tests/testthat/test_basic.R b/R-package/tests/testthat/test_basic.R index b0253b1e488e..80391089786b 100644 --- a/R-package/tests/testthat/test_basic.R +++ b/R-package/tests/testthat/test_basic.R @@ -3773,3 +3773,18 @@ test_that("lightgbm() model predictions retain factor levels for binary classifi expect_true(is.numeric(pred)) expect_false(any(pred %in% y)) }) + +test_that("lightgbm() accepts named categorical_features", { + data(mtcars) + y <- mtcars$mpg + x <- as.matrix(mtcars[, -1L]) + model <- lightgbm( + x + , y + , categorical_feature = "cyl" + , verbose = .LGB_VERBOSITY + , nrounds = 5L + , num_threads = .LGB_MAX_THREADS + ) + expect_true(length(model$params$categorical_feature) > 0L) +}) From 3fe137272fb846058e75dbcb0ae4ed1390ed8234 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Jos=C3=A9=20Morales?= Date: Fri, 4 Aug 2023 10:22:30 -0600 Subject: [PATCH 7/8] [python-package] replace np.find_common_type with np.result_type (#5999) --- python-package/lightgbm/basic.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/python-package/lightgbm/basic.py b/python-package/lightgbm/basic.py index caa71fed47e5..606dccefa6f7 100644 --- a/python-package/lightgbm/basic.py +++ b/python-package/lightgbm/basic.py @@ -699,7 +699,7 @@ def _data_from_pandas( _check_for_bad_pandas_dtypes(data.dtypes) df_dtypes = [dtype.type for dtype in data.dtypes] df_dtypes.append(np.float32) # so that the target dtype considers floats - target_dtype = np.find_common_type(df_dtypes, []) + target_dtype = np.result_type(*df_dtypes) try: # most common case (no nullable dtypes) data = data.to_numpy(dtype=target_dtype, copy=False) From 20975badd7ea3083c89f08a1f4dfda1a9789f6bc Mon Sep 17 00:00:00 2001 From: James Lamb Date: Fri, 4 Aug 2023 14:02:09 -0500 Subject: [PATCH 8/8] [ci] [R-package] use {lintr} 3.1 (#5997) --- .ci/lint_r_code.R | 20 +++++++++++++++----- .ci/test.sh | 2 +- R-package/R/lgb.interprete.R | 4 +++- 3 files changed, 19 insertions(+), 7 deletions(-) diff --git a/.ci/lint_r_code.R b/.ci/lint_r_code.R index c13471ca8fb1..12116104ef6d 100755 --- a/.ci/lint_r_code.R +++ b/.ci/lint_r_code.R @@ -33,29 +33,37 @@ LINTERS_TO_USE <- list( , "any_duplicated" = lintr::any_duplicated_linter() , "any_is_na" = lintr::any_is_na_linter() , "assignment" = lintr::assignment_linter() + , "boolean_arithmetic" = lintr::boolean_arithmetic_linter() , "braces" = lintr::brace_linter() , "class_equals" = lintr::class_equals_linter() , "commas" = lintr::commas_linter() , "duplicate_argument" = lintr::duplicate_argument_linter() + , "empty_assignment" = lintr::empty_assignment_linter() , "equals_na" = lintr::equals_na_linter() + , "for_loop_index" = lintr::for_loop_index_linter() , "function_left" = lintr::function_left_parentheses_linter() , "implicit_integers" = lintr::implicit_integer_linter() , "infix_spaces" = lintr::infix_spaces_linter() , "inner_combine" = lintr::inner_combine_linter() + , "is_numeric" = lintr::is_numeric_linter() , "fixed_regex" = lintr::fixed_regex_linter() + , "function_return" = lintr::function_return_linter() + , "lengths" = lintr::lengths_linter() , "literal_coercion" = lintr::literal_coercion_linter() , "long_lines" = lintr::line_length_linter(length = 120L) + , "matrix" = lintr::matrix_apply_linter() , "missing_argument" = lintr::missing_argument_linter() - , "no_tabs" = lintr::no_tab_linter() , "non_portable_path" = lintr::nonportable_path_linter() , "numeric_leading_zero" = lintr::numeric_leading_zero_linter() , "outer_negation" = lintr::outer_negation_linter() , "package_hooks" = lintr::package_hooks_linter() , "paste" = lintr::paste_linter() + , "quotes" = lintr::quotes_linter() + , "redundant_equals" = lintr::redundant_equals_linter() , "regex_subset" = lintr::regex_subset_linter() + , "routine_registration" = lintr::routine_registration_linter() , "semicolon" = lintr::semicolon_linter() , "seq" = lintr::seq_linter() - , "single_quotes" = lintr::single_quotes_linter() , "spaces_inside" = lintr::spaces_inside_linter() , "spaces_left_parens" = lintr::spaces_left_parentheses_linter() , "sprintf" = lintr::sprintf_linter() @@ -96,9 +104,11 @@ LINTERS_TO_USE <- list( , "??" = interactive_text ) ) - , "unneeded_concatenation" = lintr::unneeded_concatenation_linter() - , "unreachable_code" = lintr::unreachable_code_linter() - , "vector_logic" = lintr::vector_logic_linter() + , "unnecessary_concatenation" = lintr::unnecessary_concatenation_linter() + , "unnecessary_lambda" = lintr::unnecessary_lambda_linter() + , "unreachable_code" = lintr::unreachable_code_linter() + , "vector_logic" = lintr::vector_logic_linter() + , "whitespace" = lintr::whitespace_linter() ) noquote(paste0(length(FILES_TO_LINT), " R files need linting")) diff --git a/.ci/test.sh b/.ci/test.sh index 665e7f6546ec..b3acc4a670cf 100755 --- a/.ci/test.sh +++ b/.ci/test.sh @@ -73,7 +73,7 @@ if [[ $TASK == "lint" ]]; then cpplint \ isort \ mypy \ - 'r-lintr>=3.0' \ + 'r-lintr>=3.1' \ ruff source activate $CONDA_ENV echo "Linting Python code" diff --git a/R-package/R/lgb.interprete.R b/R-package/R/lgb.interprete.R index e0de0b781079..7de772664d8b 100644 --- a/R-package/R/lgb.interprete.R +++ b/R-package/R/lgb.interprete.R @@ -72,7 +72,9 @@ lgb.interprete <- function(model, leaf_index_dt <- data.table::as.data.table(x = pred_mat) leaf_index_mat_list <- lapply( X = leaf_index_dt - , FUN = function(x) matrix(x, ncol = num_class, byrow = TRUE) + , FUN = matrix + , ncol = num_class + , byrow = TRUE ) # Get list of trees