diff --git a/.bazelrc b/.bazelrc index d749968a..31c3c447 100644 --- a/.bazelrc +++ b/.bazelrc @@ -36,6 +36,7 @@ # Common flags. common --experimental_repo_remote_exec +common --incompatible_restrict_string_escapes=false build -c opt build --spawn_strategy=standalone diff --git a/.bazelversion b/.bazelversion deleted file mode 100644 index 47b6be3f..00000000 --- a/.bazelversion +++ /dev/null @@ -1 +0,0 @@ -3.7.2 \ No newline at end of file diff --git a/WORKSPACE b/WORKSPACE index 3c730e0e..22a66e8b 100644 --- a/WORKSPACE +++ b/WORKSPACE @@ -17,8 +17,10 @@ tensorflow() farmhash() boost() -# TensorFlow cannot anymore be injected from a sub-module. -# Note: TensorFlow is used to read and write TFRecord and IO if +load("@com_google_protobuf//:protobuf_deps.bzl", "protobuf_deps") +protobuf_deps() + +# TensorFlow is used to read and write TFRecord and IO if # use_tensorflow_io=1. Only a small fraction of TF will be compiled. load("@org_tensorflow//tensorflow:workspace3.bzl", tf1="workspace") tf1() diff --git a/documentation/developer_manual.md b/documentation/developer_manual.md index 4b4166a9..c9b2b152 100644 --- a/documentation/developer_manual.md +++ b/documentation/developer_manual.md @@ -12,7 +12,7 @@ * [How to test the code](#how-to-test-the-code) * [Models and Learners](#models-and-learners) - + diff --git a/documentation/installation.md b/documentation/installation.md index 04146b81..732a9ff0 100644 --- a/documentation/installation.md +++ b/documentation/installation.md @@ -12,11 +12,13 @@ interfaces. * [Table of Contents](#table-of-contents) * [Installation pre-compiled command-line-interface](#installation-pre-compiled-command-line-interface) * [Compile command-line-interface from source](#compile-command-line-interface-from-source) + * [Linux](#linux) + * [Windows](#windows) * [Running a minimal example](#running-a-minimal-example) * [Using the C++ library](#using-the-c-library) * [Troubleshooting](#troubleshooting) - + @@ -29,43 +31,75 @@ Pre-compiled binaries are available as **Requirements** -- MSVC>=2019 (Windows) or GCC>=7.3 (Linux). -- Bazel 3.7.2 -- Python 3.x +- Microsoft Visual Studio >= 2019 (Windows) +- GCC >= 8 (>=9 Recommended) or Clang (Linux) +- Bazel >= 3.7.2 +- Python >= 3 - Git - Python's numpy - MSYS2 (Windows only) -First install Bazel. Currently, the version -[3.7 of Bazel](https://docs.bazel.build/versions/3.7.0/getting-started.html) is -required: +First install [Bazel](https://docs.bazel.build). Versions 3.7.2 and 4.0.0 are +supported: -- On linux: `sudo apt update && sudo apt install bazel-3.7.2` +- On linux: `sudo apt update && sudo apt install bazel` - On windows: Follow - [the guide](https://docs.bazel.build/versions/3.7.0/install-windows.html). + [the guide](https://docs.bazel.build/versions/4.0.0/install-windows.html). For more details (and troubleshooting), see the -[Bazel installation guide](https://docs.bazel.build/versions/3.7.0/install.html). +[Bazel installation guide](https://docs.bazel.build/versions/4.0.0/install.html). Once Bazel is installed, clone the github repository and start the compilation: ```shell git clone https://github.com/google/yggdrasil-decision-forests.git cd yggdrasil_decision_forests -bazel-3.7.2 build //yggdrasil_decision_forests/...:all --config= + +bazel build //yggdrasil_decision_forests/...:all --config= ``` For example: +### Linux + +```shell +git clone https://github.com/google/yggdrasil-decision-forests.git +cd yggdrasil_decision_forests + +bazel build //yggdrasil_decision_forests/cli/...:all --config=linux_cpp17 --config=linux_avx2 +``` + +*Note:* You can specify the compiler with `--repo_env=CC`. For example: + +```shell +# Compile with GCC9 +... --repo_env=CC=gcc-9 + +# Compile with Clang +... --repo_env=CC=clang +``` + +### Windows + ```shell -bazel-3.7.2 build //yggdrasil_decision_forests/cli/...:all --config=linux_cpp17 --config=linux_avx2 + +# Note: The python path should not contain spaces. +set PYTHON_BIN_PATH=C:\Python38\python.exe + +git clone https://github.com/google/yggdrasil-decision-forests.git +cd yggdrasil_decision_forests + +bazel build //yggdrasil_decision_forests/cli/...:all --config=windows_cpp17 --config=windows_avx2 ``` -or +*Note:* If multiple version of visual studio are installed, use +`BAZEL_VC_FULL_VERSION`. For example: ```shell -bazel-3.7.2 build //yggdrasil_decision_forests/cli/...:all --config=windows_cpp17 +# Set a specific version of visual studio. +# The exact version can be found in `Program Files (x86)` e.g. C:\Program Files (x86)\Microsoft Visual Studio\2019\Community\VC\Tools\MSVC\14.28.29910. +set BAZEL_VC_FULL_VERSION=14.28.29910 ``` **Important remarks** @@ -173,6 +207,9 @@ void f() { } ``` +An example is available at [examples/beginner.cc](../examples/beginner.cc) and +[examples/BUILD](../examples/BUILD). + ## Troubleshooting **`Time out` during building** @@ -215,3 +252,15 @@ make sure to use a recent version. Bazel calls `python` during the compilation. Check which version of python you have available and create an alias `sudo ln -s /usr/bin/python3 /usr/bin/python`. + +**[Windows] `fatal error LNK1120: 6 unresolved externals` + +`yggdrasil_decision_forests::serving::decision_forest::Idendity`** + +You are using a non supported version of Visual Studio. Install VS>=2019 +(VS>=14). If multiple version of VS are installed, specify the one used by Bazel +with `BAZEL_VC_FULL_VERSION`. + +**Segmentation fault when any program starts on `std::filesystem::~path()`** + +`lstdc++fs` is not linked. You are likely using GCC8 without TensorFlow. Update +to GCC>=9 or use TensorFlow for IO (`--config=use_tensorflow_io`). diff --git a/documentation/learners.md b/documentation/learners.md index d5dda8f4..c1dc07ae 100644 --- a/documentation/learners.md +++ b/documentation/learners.md @@ -17,8 +17,8 @@ the gradient of the loss relative to the model output). ### Training configuration - learner/abstract_learner.proto +- learner/decision_tree/decision_tree.proto - learner/gradient_boosted_trees/gradient_boosted_trees.proto -- model/decision_tree/decision_tree.proto ### Generic Hyper-parameters (compatible with TensorFlow Decision Forests) @@ -31,21 +31,21 @@ the gradient of the loss relative to the model output). datasets used train individual trees are adapted dynamically so that all the trees are trained in time. -#### [allow_na_conditions](../yggdrasil_decision_forests/model/decision_tree/decision_tree.proto?q=symbol:allow_na_conditions) +#### [allow_na_conditions](../yggdrasil_decision_forests/learner/decision_tree/decision_tree.proto?q=symbol:allow_na_conditions) - **Type:** Categorical **Default:** false **Possible values:** true, false - If true, the tree training evaluates conditions of the type `X is NA` i.e. `X is missing`. -#### [categorical_algorithm](../yggdrasil_decision_forests/model/decision_tree/decision_tree.proto?q=symbol:categorical_algorithm) +#### [categorical_algorithm](../yggdrasil_decision_forests/learner/decision_tree/decision_tree.proto?q=symbol:categorical_algorithm) - **Type:** Categorical **Default:** CART **Possible values:** CART, ONE_HOT, RANDOM - How to learn splits on categorical attributes.
- `CART`: CART algorithm. Find categorical splits of the form "value \in mask". The solution is exact for binary classification, regression and ranking. It is approximated for multi-class classification. This is a good first algorithm to use. In case of overfitting (very small dataset, large dictionary), the "random" algorithm is a good alternative.
- `ONE_HOT`: One-hot encoding. Find the optimal categorical split of the form "attribute == param". This method is similar (but more efficient) than converting converting each possible categorical value into a boolean feature. This method is available for comparison purpose and generally performs worse than other alternatives.
- `RANDOM`: Best splits among a set of random candidate. Find the a categorical split of the form "value \in mask" using a random search. This solution can be seen as an approximation of the CART algorithm. This method is a strong alternative to CART. This algorithm is inspired from section "5.1 Categorical Variables" of "Random Forest", 2001. -#### [categorical_set_split_greedy_sampling](../yggdrasil_decision_forests/model/decision_tree/decision_tree.proto?q=symbol:categorical_set_greedy_forward) +#### [categorical_set_split_greedy_sampling](../yggdrasil_decision_forests/learner/decision_tree/decision_tree.proto?q=symbol:categorical_set_greedy_forward) - **Type:** Real **Default:** 0.1 **Possible values:** min:0 max:1 @@ -53,7 +53,7 @@ the gradient of the loss relative to the model output). to be a candidate for the positive set. The sampling is applied once per node (i.e. not at every step of the greedy optimization). -#### [categorical_set_split_max_num_items](../yggdrasil_decision_forests/model/decision_tree/decision_tree.proto?q=symbol:max_num_items) +#### [categorical_set_split_max_num_items](../yggdrasil_decision_forests/learner/decision_tree/decision_tree.proto?q=symbol:max_num_items) - **Type:** Integer **Default:** -1 **Possible values:** min:-1 @@ -64,7 +64,7 @@ the gradient of the loss relative to the model output). `max_vocab_count`, all the remaining items are grouped in a special Out-of-vocabulary item. With `max_num_items`, this is not the case. -#### [categorical_set_split_min_item_frequency](../yggdrasil_decision_forests/model/decision_tree/decision_tree.proto?q=symbol:min_item_frequency) +#### [categorical_set_split_min_item_frequency](../yggdrasil_decision_forests/learner/decision_tree/decision_tree.proto?q=symbol:min_item_frequency) - **Type:** Integer **Default:** 1 **Possible values:** min:1 @@ -77,6 +77,20 @@ the gradient of the loss relative to the model output). - Dropout rate applied when using the DART i.e. when forest_extraction=DART. +#### [early_stopping](../yggdrasil_decision_forests/learner/gradient_boosted_trees/gradient_boosted_trees.proto?q=symbol:early_stopping) + +- **Type:** Categorical **Default:** LOSS_INCREASE **Possible values:** NONE, + MIN_LOSS_FINAL, LOSS_INCREASE + +- Early stopping detects the overfitting of the model and halts it training using the validation dataset controlled by `validation_ratio`.
- `NONE`: No early stopping. The model is trained entirely.
- `MIN_LOSS_FINAL`: No early stopping. However, the model is then truncated to maximize the validation loss.
- `LOSS_INCREASE`: Stop the training when the validation does not decrease for `early_stopping_num_trees_look_ahead` trees. + +#### [early_stopping_num_trees_look_ahead](../yggdrasil_decision_forests/learner/gradient_boosted_trees/gradient_boosted_trees.proto?q=symbol:early_stopping_num_trees_look_ahead) + +- **Type:** Integer **Default:** 30 **Possible values:** min:1 + +- Rolling number of trees used to detect validation loss increase and trigger + early stopping. + #### [forest_extraction](../yggdrasil_decision_forests/learner/gradient_boosted_trees/gradient_boosted_trees.proto?q=symbol:forest_extraction) - **Type:** Categorical **Default:** MART **Possible values:** MART, DART @@ -98,14 +112,14 @@ the gradient of the loss relative to the model output). - Beta parameter for the GOSS (Gradient-based One-Side Sampling) sampling method. -#### [growing_strategy](../yggdrasil_decision_forests/model/decision_tree/decision_tree.proto?q=symbol:growing_strategy) +#### [growing_strategy](../yggdrasil_decision_forests/learner/decision_tree/decision_tree.proto?q=symbol:growing_strategy) - **Type:** Categorical **Default:** LOCAL **Possible values:** LOCAL, BEST_FIRST_GLOBAL - How to grow the tree.
- `LOCAL`: Each node is split independently of the other nodes. In other words, as long as a node satisfy the splits "constraints (e.g. maximum depth, minimum number of observations), the node will be split. This is the "classical" way to grow decision trees.
- `BEST_FIRST_GLOBAL`: The node with the best loss reduction among all the nodes of the tree is selected for splitting. This method is also called "best first" or "leaf-wise growth". See "Best-first decision tree learning", Shi and "Additive logistic regression : A statistical view of boosting", Friedman for more details. -#### [in_split_min_examples_check](../yggdrasil_decision_forests/model/decision_tree/decision_tree.proto?q=symbol:in_split_min_examples_check) +#### [in_split_min_examples_check](../yggdrasil_decision_forests/learner/decision_tree/decision_tree.proto?q=symbol:in_split_min_examples_check) - **Type:** Categorical **Default:** true **Possible values:** true, false @@ -143,14 +157,14 @@ the gradient of the loss relative to the model output). - Lambda regularization applied to certain training loss functions. Only for NDCG loss. -#### [max_depth](../yggdrasil_decision_forests/model/decision_tree/decision_tree.proto?q=symbol:max_depth) +#### [max_depth](../yggdrasil_decision_forests/learner/decision_tree/decision_tree.proto?q=symbol:max_depth) - **Type:** Integer **Default:** 6 **Possible values:** min:-1 - Maximum depth of the tree. `max_depth=1` means that all trees will be roots. Negative values are ignored. -#### [max_num_nodes](../yggdrasil_decision_forests/model/decision_tree/decision_tree.proto?q=symbol:max_num_nodes) +#### [max_num_nodes](../yggdrasil_decision_forests/learner/decision_tree/decision_tree.proto?q=symbol:max_num_nodes) - **Type:** Integer **Default:** 31 **Possible values:** min:-1 @@ -165,20 +179,20 @@ the gradient of the loss relative to the model output). algorithm is free to use this parameter at it sees fit. Enabling maximum training duration makes the model training non-deterministic. -#### [min_examples](../yggdrasil_decision_forests/model/decision_tree/decision_tree.proto?q=symbol:min_examples) +#### [min_examples](../yggdrasil_decision_forests/learner/decision_tree/decision_tree.proto?q=symbol:min_examples) - **Type:** Integer **Default:** 5 **Possible values:** min:1 - Minimum number of examples in a node. -#### [missing_value_policy](../yggdrasil_decision_forests/model/decision_tree/decision_tree.proto?q=symbol:missing_value_policy) +#### [missing_value_policy](../yggdrasil_decision_forests/learner/decision_tree/decision_tree.proto?q=symbol:missing_value_policy) - **Type:** Categorical **Default:** GLOBAL_IMPUTATION **Possible values:** GLOBAL_IMPUTATION, LOCAL_IMPUTATION, RANDOM_LOCAL_IMPUTATION - Method used to handle missing attribute values.
- `GLOBAL_IMPUTATION`: Missing attribute values are imputed, with the mean (in case of numerical attribute) or the most-frequent-item (in case of categorical attribute) computed on the entire dataset (i.e. the information contained in the data spec).
- `LOCAL_IMPUTATION`: Missing attribute values are imputed with the mean (numerical attribute) or most-frequent-item (in the case of categorical attribute) evaluated on the training examples in the current node.
- `RANDOM_LOCAL_IMPUTATION`: Missing attribute values are imputed from randomly sampled values from the training examples in the current node. This method was proposed by Clinic et al. in "Random Survival Forests" (https://projecteuclid.org/download/pdfview_1/euclid.aoas/1223908043). -#### [num_candidate_attributes](../yggdrasil_decision_forests/model/decision_tree/decision_tree.proto?q=symbol:num_candidate_attributes) +#### [num_candidate_attributes](../yggdrasil_decision_forests/learner/decision_tree/decision_tree.proto?q=symbol:num_candidate_attributes) - **Type:** Integer **Default:** -1 **Possible values:** min:-1 @@ -189,7 +203,7 @@ the gradient of the loss relative to the model output). `number_of_input_attributes / 3` in case of regression. If `num_candidate_attributes=-1`, all the attributes are tested. -#### [num_candidate_attributes_ratio](../yggdrasil_decision_forests/model/decision_tree/decision_tree.proto?q=symbol:num_candidate_attributes_ratio) +#### [num_candidate_attributes_ratio](../yggdrasil_decision_forests/learner/decision_tree/decision_tree.proto?q=symbol:num_candidate_attributes_ratio) - **Type:** Real **Default:** -1 **Possible values:** min:-1 max:1 @@ -231,14 +245,14 @@ the gradient of the loss relative to the model output). give more accurate results (assuming enough trees are trained), but results in larger models. Analogous to neural network learning rate. -#### [sparse_oblique_normalization](../yggdrasil_decision_forests/model/decision_tree/decision_tree.proto?q=symbol:sparse_oblique_split) +#### [sparse_oblique_normalization](../yggdrasil_decision_forests/learner/decision_tree/decision_tree.proto?q=symbol:sparse_oblique_split) - **Type:** Categorical **Default:** NONE **Possible values:** NONE, STANDARD_DEVIATION, MIN_MAX - For sparse oblique splits i.e. `split_axis=SPARSE_OBLIQUE`. Normalization applied on the features, before applying the sparse oblique projections.
- `NONE`: No normalization.
- `STANDARD_DEVIATION`: Normalize the feature by the estimated standard deviation on the entire train dataset. Also known as Z-Score normalization.
- `MIN_MAX`: Normalize the feature by the range (i.e. max-min) estimated on the entire train dataset. -#### [sparse_oblique_num_projections_exponent](../yggdrasil_decision_forests/model/decision_tree/decision_tree.proto?q=symbol:num_projections_exponent) +#### [sparse_oblique_num_projections_exponent](../yggdrasil_decision_forests/learner/decision_tree/decision_tree.proto?q=symbol:num_projections_exponent) - **Type:** Real **Default:** 2 **Possible values:** min:0 @@ -246,7 +260,7 @@ the gradient of the loss relative to the model output). number of random projections to test at each node as `num_features^num_projections_exponent`. -#### [sparse_oblique_projection_density_factor](../yggdrasil_decision_forests/model/decision_tree/decision_tree.proto?q=symbol:projection_density_factor) +#### [sparse_oblique_projection_density_factor](../yggdrasil_decision_forests/learner/decision_tree/decision_tree.proto?q=symbol:projection_density_factor) - **Type:** Real **Default:** 2 **Possible values:** min:0 @@ -254,7 +268,7 @@ the gradient of the loss relative to the model output). number of random projections to test at each node as `num_features^num_projections_exponent`. -#### [split_axis](../yggdrasil_decision_forests/model/decision_tree/decision_tree.proto?q=symbol:split_axis) +#### [split_axis](../yggdrasil_decision_forests/learner/decision_tree/decision_tree.proto?q=symbol:split_axis) - **Type:** Categorical **Default:** AXIS_ALIGNED **Possible values:** AXIS_ALIGNED, SPARSE_OBLIQUE @@ -281,6 +295,13 @@ the gradient of the loss relative to the model output). optimizes the splits to minimize the variance of "gradient / hessian. Available for all losses except regression. +#### [validation_ratio](../yggdrasil_decision_forests/learner/gradient_boosted_trees/gradient_boosted_trees.proto?q=symbol:validation_ratio) + +- **Type:** Real **Default:** 0.1 **Possible values:** min:0 max:1 + +- Ratio of the training dataset used to monitor the training. Require to be >0 + if early stopping is enabled. + ## RANDOM_FOREST @@ -300,8 +321,8 @@ It is probably the most well-known of the Decision Forest training algorithms. ### Training configuration - learner/abstract_learner.proto +- learner/decision_tree/decision_tree.proto - learner/random_forest/random_forest.proto -- model/decision_tree/decision_tree.proto ### Generic Hyper-parameters (compatible with TensorFlow Decision Forests) @@ -315,21 +336,21 @@ It is probably the most well-known of the Decision Forest training algorithms. `maximum_training_duration`. Has no effect if there is no maximum training duration specified. -#### [allow_na_conditions](../yggdrasil_decision_forests/model/decision_tree/decision_tree.proto?q=symbol:allow_na_conditions) +#### [allow_na_conditions](../yggdrasil_decision_forests/learner/decision_tree/decision_tree.proto?q=symbol:allow_na_conditions) - **Type:** Categorical **Default:** false **Possible values:** true, false - If true, the tree training evaluates conditions of the type `X is NA` i.e. `X is missing`. -#### [categorical_algorithm](../yggdrasil_decision_forests/model/decision_tree/decision_tree.proto?q=symbol:categorical_algorithm) +#### [categorical_algorithm](../yggdrasil_decision_forests/learner/decision_tree/decision_tree.proto?q=symbol:categorical_algorithm) - **Type:** Categorical **Default:** CART **Possible values:** CART, ONE_HOT, RANDOM - How to learn splits on categorical attributes.
- `CART`: CART algorithm. Find categorical splits of the form "value \in mask". The solution is exact for binary classification, regression and ranking. It is approximated for multi-class classification. This is a good first algorithm to use. In case of overfitting (very small dataset, large dictionary), the "random" algorithm is a good alternative.
- `ONE_HOT`: One-hot encoding. Find the optimal categorical split of the form "attribute == param". This method is similar (but more efficient) than converting converting each possible categorical value into a boolean feature. This method is available for comparison purpose and generally performs worse than other alternatives.
- `RANDOM`: Best splits among a set of random candidate. Find the a categorical split of the form "value \in mask" using a random search. This solution can be seen as an approximation of the CART algorithm. This method is a strong alternative to CART. This algorithm is inspired from section "5.1 Categorical Variables" of "Random Forest", 2001. -#### [categorical_set_split_greedy_sampling](../yggdrasil_decision_forests/model/decision_tree/decision_tree.proto?q=symbol:categorical_set_greedy_forward) +#### [categorical_set_split_greedy_sampling](../yggdrasil_decision_forests/learner/decision_tree/decision_tree.proto?q=symbol:categorical_set_greedy_forward) - **Type:** Real **Default:** 0.1 **Possible values:** min:0 max:1 @@ -337,7 +358,7 @@ It is probably the most well-known of the Decision Forest training algorithms. to be a candidate for the positive set. The sampling is applied once per node (i.e. not at every step of the greedy optimization). -#### [categorical_set_split_max_num_items](../yggdrasil_decision_forests/model/decision_tree/decision_tree.proto?q=symbol:max_num_items) +#### [categorical_set_split_max_num_items](../yggdrasil_decision_forests/learner/decision_tree/decision_tree.proto?q=symbol:max_num_items) - **Type:** Integer **Default:** -1 **Possible values:** min:-1 @@ -348,7 +369,7 @@ It is probably the most well-known of the Decision Forest training algorithms. `max_vocab_count`, all the remaining items are grouped in a special Out-of-vocabulary item. With `max_num_items`, this is not the case. -#### [categorical_set_split_min_item_frequency](../yggdrasil_decision_forests/model/decision_tree/decision_tree.proto?q=symbol:min_item_frequency) +#### [categorical_set_split_min_item_frequency](../yggdrasil_decision_forests/learner/decision_tree/decision_tree.proto?q=symbol:min_item_frequency) - **Type:** Integer **Default:** 1 **Possible values:** min:1 @@ -371,14 +392,14 @@ It is probably the most well-known of the Decision Forest training algorithms. summary and model inspector). Note that the OOB feature importance can be expensive to compute. -#### [growing_strategy](../yggdrasil_decision_forests/model/decision_tree/decision_tree.proto?q=symbol:growing_strategy) +#### [growing_strategy](../yggdrasil_decision_forests/learner/decision_tree/decision_tree.proto?q=symbol:growing_strategy) - **Type:** Categorical **Default:** LOCAL **Possible values:** LOCAL, BEST_FIRST_GLOBAL - How to grow the tree.
- `LOCAL`: Each node is split independently of the other nodes. In other words, as long as a node satisfy the splits "constraints (e.g. maximum depth, minimum number of observations), the node will be split. This is the "classical" way to grow decision trees.
- `BEST_FIRST_GLOBAL`: The node with the best loss reduction among all the nodes of the tree is selected for splitting. This method is also called "best first" or "leaf-wise growth". See "Best-first decision tree learning", Shi and "Additive logistic regression : A statistical view of boosting", Friedman for more details. -#### [in_split_min_examples_check](../yggdrasil_decision_forests/model/decision_tree/decision_tree.proto?q=symbol:in_split_min_examples_check) +#### [in_split_min_examples_check](../yggdrasil_decision_forests/learner/decision_tree/decision_tree.proto?q=symbol:in_split_min_examples_check) - **Type:** Categorical **Default:** true **Possible values:** true, false @@ -388,14 +409,14 @@ It is probably the most well-known of the Decision Forest training algorithms. only if it contains more than `min_examples` examples). If false, there can be nodes with less than `min_examples` training examples. -#### [max_depth](../yggdrasil_decision_forests/model/decision_tree/decision_tree.proto?q=symbol:max_depth) +#### [max_depth](../yggdrasil_decision_forests/learner/decision_tree/decision_tree.proto?q=symbol:max_depth) - **Type:** Integer **Default:** 16 **Possible values:** min:-1 - Maximum depth of the tree. `max_depth=1` means that all trees will be roots. Negative values are ignored. -#### [max_num_nodes](../yggdrasil_decision_forests/model/decision_tree/decision_tree.proto?q=symbol:max_num_nodes) +#### [max_num_nodes](../yggdrasil_decision_forests/learner/decision_tree/decision_tree.proto?q=symbol:max_num_nodes) - **Type:** Integer **Default:** 31 **Possible values:** min:-1 @@ -410,20 +431,20 @@ It is probably the most well-known of the Decision Forest training algorithms. algorithm is free to use this parameter at it sees fit. Enabling maximum training duration makes the model training non-deterministic. -#### [min_examples](../yggdrasil_decision_forests/model/decision_tree/decision_tree.proto?q=symbol:min_examples) +#### [min_examples](../yggdrasil_decision_forests/learner/decision_tree/decision_tree.proto?q=symbol:min_examples) - **Type:** Integer **Default:** 5 **Possible values:** min:1 - Minimum number of examples in a node. -#### [missing_value_policy](../yggdrasil_decision_forests/model/decision_tree/decision_tree.proto?q=symbol:missing_value_policy) +#### [missing_value_policy](../yggdrasil_decision_forests/learner/decision_tree/decision_tree.proto?q=symbol:missing_value_policy) - **Type:** Categorical **Default:** GLOBAL_IMPUTATION **Possible values:** GLOBAL_IMPUTATION, LOCAL_IMPUTATION, RANDOM_LOCAL_IMPUTATION - Method used to handle missing attribute values.
- `GLOBAL_IMPUTATION`: Missing attribute values are imputed, with the mean (in case of numerical attribute) or the most-frequent-item (in case of categorical attribute) computed on the entire dataset (i.e. the information contained in the data spec).
- `LOCAL_IMPUTATION`: Missing attribute values are imputed with the mean (numerical attribute) or most-frequent-item (in the case of categorical attribute) evaluated on the training examples in the current node.
- `RANDOM_LOCAL_IMPUTATION`: Missing attribute values are imputed from randomly sampled values from the training examples in the current node. This method was proposed by Clinic et al. in "Random Survival Forests" (https://projecteuclid.org/download/pdfview_1/euclid.aoas/1223908043). -#### [num_candidate_attributes](../yggdrasil_decision_forests/model/decision_tree/decision_tree.proto?q=symbol:num_candidate_attributes) +#### [num_candidate_attributes](../yggdrasil_decision_forests/learner/decision_tree/decision_tree.proto?q=symbol:num_candidate_attributes) - **Type:** Integer **Default:** 0 **Possible values:** min:-1 @@ -434,7 +455,7 @@ It is probably the most well-known of the Decision Forest training algorithms. `number_of_input_attributes / 3` in case of regression. If `num_candidate_attributes=-1`, all the attributes are tested. -#### [num_candidate_attributes_ratio](../yggdrasil_decision_forests/model/decision_tree/decision_tree.proto?q=symbol:num_candidate_attributes_ratio) +#### [num_candidate_attributes_ratio](../yggdrasil_decision_forests/learner/decision_tree/decision_tree.proto?q=symbol:num_candidate_attributes_ratio) - **Type:** Real **Default:** -1 **Possible values:** min:-1 max:1 @@ -452,14 +473,14 @@ It is probably the most well-known of the Decision Forest training algorithms. increase the quality of the model at the expense of size, training speed, and inference latency. -#### [sparse_oblique_normalization](../yggdrasil_decision_forests/model/decision_tree/decision_tree.proto?q=symbol:sparse_oblique_split) +#### [sparse_oblique_normalization](../yggdrasil_decision_forests/learner/decision_tree/decision_tree.proto?q=symbol:sparse_oblique_split) - **Type:** Categorical **Default:** NONE **Possible values:** NONE, STANDARD_DEVIATION, MIN_MAX - For sparse oblique splits i.e. `split_axis=SPARSE_OBLIQUE`. Normalization applied on the features, before applying the sparse oblique projections.
- `NONE`: No normalization.
- `STANDARD_DEVIATION`: Normalize the feature by the estimated standard deviation on the entire train dataset. Also known as Z-Score normalization.
- `MIN_MAX`: Normalize the feature by the range (i.e. max-min) estimated on the entire train dataset. -#### [sparse_oblique_num_projections_exponent](../yggdrasil_decision_forests/model/decision_tree/decision_tree.proto?q=symbol:num_projections_exponent) +#### [sparse_oblique_num_projections_exponent](../yggdrasil_decision_forests/learner/decision_tree/decision_tree.proto?q=symbol:num_projections_exponent) - **Type:** Real **Default:** 2 **Possible values:** min:0 @@ -467,7 +488,7 @@ It is probably the most well-known of the Decision Forest training algorithms. number of random projections to test at each node as `num_features^num_projections_exponent`. -#### [sparse_oblique_projection_density_factor](../yggdrasil_decision_forests/model/decision_tree/decision_tree.proto?q=symbol:projection_density_factor) +#### [sparse_oblique_projection_density_factor](../yggdrasil_decision_forests/learner/decision_tree/decision_tree.proto?q=symbol:projection_density_factor) - **Type:** Real **Default:** 2 **Possible values:** min:0 @@ -475,7 +496,7 @@ It is probably the most well-known of the Decision Forest training algorithms. number of random projections to test at each node as `num_features^num_projections_exponent`. -#### [split_axis](../yggdrasil_decision_forests/model/decision_tree/decision_tree.proto?q=symbol:split_axis) +#### [split_axis](../yggdrasil_decision_forests/learner/decision_tree/decision_tree.proto?q=symbol:split_axis) - **Type:** Categorical **Default:** AXIS_ALIGNED **Possible values:** AXIS_ALIGNED, SPARSE_OBLIQUE @@ -505,25 +526,25 @@ used to grow the tree while the second is used to prune the tree. - learner/abstract_learner.proto - learner/cart/cart.proto -- model/decision_tree/decision_tree.proto +- learner/decision_tree/decision_tree.proto ### Generic Hyper-parameters (compatible with TensorFlow Decision Forests) -#### [allow_na_conditions](../yggdrasil_decision_forests/model/decision_tree/decision_tree.proto?q=symbol:allow_na_conditions) +#### [allow_na_conditions](../yggdrasil_decision_forests/learner/decision_tree/decision_tree.proto?q=symbol:allow_na_conditions) - **Type:** Categorical **Default:** false **Possible values:** true, false - If true, the tree training evaluates conditions of the type `X is NA` i.e. `X is missing`. -#### [categorical_algorithm](../yggdrasil_decision_forests/model/decision_tree/decision_tree.proto?q=symbol:categorical_algorithm) +#### [categorical_algorithm](../yggdrasil_decision_forests/learner/decision_tree/decision_tree.proto?q=symbol:categorical_algorithm) - **Type:** Categorical **Default:** CART **Possible values:** CART, ONE_HOT, RANDOM - How to learn splits on categorical attributes.
- `CART`: CART algorithm. Find categorical splits of the form "value \in mask". The solution is exact for binary classification, regression and ranking. It is approximated for multi-class classification. This is a good first algorithm to use. In case of overfitting (very small dataset, large dictionary), the "random" algorithm is a good alternative.
- `ONE_HOT`: One-hot encoding. Find the optimal categorical split of the form "attribute == param". This method is similar (but more efficient) than converting converting each possible categorical value into a boolean feature. This method is available for comparison purpose and generally performs worse than other alternatives.
- `RANDOM`: Best splits among a set of random candidate. Find the a categorical split of the form "value \in mask" using a random search. This solution can be seen as an approximation of the CART algorithm. This method is a strong alternative to CART. This algorithm is inspired from section "5.1 Categorical Variables" of "Random Forest", 2001. -#### [categorical_set_split_greedy_sampling](../yggdrasil_decision_forests/model/decision_tree/decision_tree.proto?q=symbol:categorical_set_greedy_forward) +#### [categorical_set_split_greedy_sampling](../yggdrasil_decision_forests/learner/decision_tree/decision_tree.proto?q=symbol:categorical_set_greedy_forward) - **Type:** Real **Default:** 0.1 **Possible values:** min:0 max:1 @@ -531,7 +552,7 @@ used to grow the tree while the second is used to prune the tree. to be a candidate for the positive set. The sampling is applied once per node (i.e. not at every step of the greedy optimization). -#### [categorical_set_split_max_num_items](../yggdrasil_decision_forests/model/decision_tree/decision_tree.proto?q=symbol:max_num_items) +#### [categorical_set_split_max_num_items](../yggdrasil_decision_forests/learner/decision_tree/decision_tree.proto?q=symbol:max_num_items) - **Type:** Integer **Default:** -1 **Possible values:** min:-1 @@ -542,21 +563,21 @@ used to grow the tree while the second is used to prune the tree. `max_vocab_count`, all the remaining items are grouped in a special Out-of-vocabulary item. With `max_num_items`, this is not the case. -#### [categorical_set_split_min_item_frequency](../yggdrasil_decision_forests/model/decision_tree/decision_tree.proto?q=symbol:min_item_frequency) +#### [categorical_set_split_min_item_frequency](../yggdrasil_decision_forests/learner/decision_tree/decision_tree.proto?q=symbol:min_item_frequency) - **Type:** Integer **Default:** 1 **Possible values:** min:1 - For categorical set splits e.g. texts. Minimum number of occurrences of an item to be considered. -#### [growing_strategy](../yggdrasil_decision_forests/model/decision_tree/decision_tree.proto?q=symbol:growing_strategy) +#### [growing_strategy](../yggdrasil_decision_forests/learner/decision_tree/decision_tree.proto?q=symbol:growing_strategy) - **Type:** Categorical **Default:** LOCAL **Possible values:** LOCAL, BEST_FIRST_GLOBAL - How to grow the tree.
- `LOCAL`: Each node is split independently of the other nodes. In other words, as long as a node satisfy the splits "constraints (e.g. maximum depth, minimum number of observations), the node will be split. This is the "classical" way to grow decision trees.
- `BEST_FIRST_GLOBAL`: The node with the best loss reduction among all the nodes of the tree is selected for splitting. This method is also called "best first" or "leaf-wise growth". See "Best-first decision tree learning", Shi and "Additive logistic regression : A statistical view of boosting", Friedman for more details. -#### [in_split_min_examples_check](../yggdrasil_decision_forests/model/decision_tree/decision_tree.proto?q=symbol:in_split_min_examples_check) +#### [in_split_min_examples_check](../yggdrasil_decision_forests/learner/decision_tree/decision_tree.proto?q=symbol:in_split_min_examples_check) - **Type:** Categorical **Default:** true **Possible values:** true, false @@ -566,14 +587,14 @@ used to grow the tree while the second is used to prune the tree. only if it contains more than `min_examples` examples). If false, there can be nodes with less than `min_examples` training examples. -#### [max_depth](../yggdrasil_decision_forests/model/decision_tree/decision_tree.proto?q=symbol:max_depth) +#### [max_depth](../yggdrasil_decision_forests/learner/decision_tree/decision_tree.proto?q=symbol:max_depth) - **Type:** Integer **Default:** 16 **Possible values:** min:-1 - Maximum depth of the tree. `max_depth=1` means that all trees will be roots. Negative values are ignored. -#### [max_num_nodes](../yggdrasil_decision_forests/model/decision_tree/decision_tree.proto?q=symbol:max_num_nodes) +#### [max_num_nodes](../yggdrasil_decision_forests/learner/decision_tree/decision_tree.proto?q=symbol:max_num_nodes) - **Type:** Integer **Default:** 31 **Possible values:** min:-1 @@ -588,20 +609,20 @@ used to grow the tree while the second is used to prune the tree. algorithm is free to use this parameter at it sees fit. Enabling maximum training duration makes the model training non-deterministic. -#### [min_examples](../yggdrasil_decision_forests/model/decision_tree/decision_tree.proto?q=symbol:min_examples) +#### [min_examples](../yggdrasil_decision_forests/learner/decision_tree/decision_tree.proto?q=symbol:min_examples) - **Type:** Integer **Default:** 5 **Possible values:** min:1 - Minimum number of examples in a node. -#### [missing_value_policy](../yggdrasil_decision_forests/model/decision_tree/decision_tree.proto?q=symbol:missing_value_policy) +#### [missing_value_policy](../yggdrasil_decision_forests/learner/decision_tree/decision_tree.proto?q=symbol:missing_value_policy) - **Type:** Categorical **Default:** GLOBAL_IMPUTATION **Possible values:** GLOBAL_IMPUTATION, LOCAL_IMPUTATION, RANDOM_LOCAL_IMPUTATION - Method used to handle missing attribute values.
- `GLOBAL_IMPUTATION`: Missing attribute values are imputed, with the mean (in case of numerical attribute) or the most-frequent-item (in case of categorical attribute) computed on the entire dataset (i.e. the information contained in the data spec).
- `LOCAL_IMPUTATION`: Missing attribute values are imputed with the mean (numerical attribute) or most-frequent-item (in the case of categorical attribute) evaluated on the training examples in the current node.
- `RANDOM_LOCAL_IMPUTATION`: Missing attribute values are imputed from randomly sampled values from the training examples in the current node. This method was proposed by Clinic et al. in "Random Survival Forests" (https://projecteuclid.org/download/pdfview_1/euclid.aoas/1223908043). -#### [num_candidate_attributes](../yggdrasil_decision_forests/model/decision_tree/decision_tree.proto?q=symbol:num_candidate_attributes) +#### [num_candidate_attributes](../yggdrasil_decision_forests/learner/decision_tree/decision_tree.proto?q=symbol:num_candidate_attributes) - **Type:** Integer **Default:** 0 **Possible values:** min:-1 @@ -612,7 +633,7 @@ used to grow the tree while the second is used to prune the tree. `number_of_input_attributes / 3` in case of regression. If `num_candidate_attributes=-1`, all the attributes are tested. -#### [num_candidate_attributes_ratio](../yggdrasil_decision_forests/model/decision_tree/decision_tree.proto?q=symbol:num_candidate_attributes_ratio) +#### [num_candidate_attributes_ratio](../yggdrasil_decision_forests/learner/decision_tree/decision_tree.proto?q=symbol:num_candidate_attributes_ratio) - **Type:** Real **Default:** -1 **Possible values:** min:-1 max:1 @@ -622,14 +643,14 @@ used to grow the tree while the second is used to prune the tree. as well as -1. If not set or equal to -1, the `num_candidate_attributes` is used. -#### [sparse_oblique_normalization](../yggdrasil_decision_forests/model/decision_tree/decision_tree.proto?q=symbol:sparse_oblique_split) +#### [sparse_oblique_normalization](../yggdrasil_decision_forests/learner/decision_tree/decision_tree.proto?q=symbol:sparse_oblique_split) - **Type:** Categorical **Default:** NONE **Possible values:** NONE, STANDARD_DEVIATION, MIN_MAX - For sparse oblique splits i.e. `split_axis=SPARSE_OBLIQUE`. Normalization applied on the features, before applying the sparse oblique projections.
- `NONE`: No normalization.
- `STANDARD_DEVIATION`: Normalize the feature by the estimated standard deviation on the entire train dataset. Also known as Z-Score normalization.
- `MIN_MAX`: Normalize the feature by the range (i.e. max-min) estimated on the entire train dataset. -#### [sparse_oblique_num_projections_exponent](../yggdrasil_decision_forests/model/decision_tree/decision_tree.proto?q=symbol:num_projections_exponent) +#### [sparse_oblique_num_projections_exponent](../yggdrasil_decision_forests/learner/decision_tree/decision_tree.proto?q=symbol:num_projections_exponent) - **Type:** Real **Default:** 2 **Possible values:** min:0 @@ -637,7 +658,7 @@ used to grow the tree while the second is used to prune the tree. number of random projections to test at each node as `num_features^num_projections_exponent`. -#### [sparse_oblique_projection_density_factor](../yggdrasil_decision_forests/model/decision_tree/decision_tree.proto?q=symbol:projection_density_factor) +#### [sparse_oblique_projection_density_factor](../yggdrasil_decision_forests/learner/decision_tree/decision_tree.proto?q=symbol:projection_density_factor) - **Type:** Real **Default:** 2 **Possible values:** min:0 @@ -645,7 +666,7 @@ used to grow the tree while the second is used to prune the tree. number of random projections to test at each node as `num_features^num_projections_exponent`. -#### [split_axis](../yggdrasil_decision_forests/model/decision_tree/decision_tree.proto?q=symbol:split_axis) +#### [split_axis](../yggdrasil_decision_forests/learner/decision_tree/decision_tree.proto?q=symbol:split_axis) - **Type:** Categorical **Default:** AXIS_ALIGNED **Possible values:** AXIS_ALIGNED, SPARSE_OBLIQUE diff --git a/documentation/user_manual.md b/documentation/user_manual.md index 300dfc98..787295db 100644 --- a/documentation/user_manual.md +++ b/documentation/user_manual.md @@ -32,7 +32,7 @@ It is complementary to the beginner example available in `examples/`. * [Fast engine](#fast-engine) * [Advanced features](#advanced-features) - + diff --git a/examples/BUILD b/examples/BUILD index 42e965c2..b5757fb1 100644 --- a/examples/BUILD +++ b/examples/BUILD @@ -1,8 +1,17 @@ package( - default_visibility = ["//third_party/tensorflow_decision_forests:users"], + default_visibility = ["//visibility:public"], licenses = ["notice"], ) +# Usage example of Yggdrasil Decision Forests as a library. +# +# Compile and run the example with: +# bazel build //examples:beginner_cc --config=linux_cpp17 --config=linux_avx2 +# bazel-bin/examples/beginner_cc --alsologtostderr +# +# See the "Using the C++ library" section in the user manual for more details about the API. +# See the "Compile command-line-interface from source" section in the user manual for more details about the compilation flags. +# cc_binary( name = "beginner_cc", srcs = ["beginner.cc"], @@ -14,12 +23,12 @@ cc_binary( # See TensorFlow Decision Forests for an example (TF-DF imports YDF): # https://github.com/tensorflow/decision-forests deps = [ - "//learning/lib/ami/simple_ml/dataset:vertical_dataset_io", "@com_google_absl//absl/flags:flag", "//yggdrasil_decision_forests/dataset:all_dataset_formats", "//yggdrasil_decision_forests/dataset:data_spec", "//yggdrasil_decision_forests/dataset:data_spec_cc_proto", "//yggdrasil_decision_forests/dataset:data_spec_inference", + "//yggdrasil_decision_forests/dataset:vertical_dataset_io", "//yggdrasil_decision_forests/learner:all_learners", "//yggdrasil_decision_forests/learner:learner_library", "//yggdrasil_decision_forests/metric", diff --git a/examples/beginner.cc b/examples/beginner.cc index 611dc001..3548de22 100644 --- a/examples/beginner.cc +++ b/examples/beginner.cc @@ -42,11 +42,11 @@ // --alsologtostderr // -#include "learning/lib/ami/simple_ml/dataset/vertical_dataset_io.h" #include "absl/flags/flag.h" #include "yggdrasil_decision_forests/dataset/data_spec.h" #include "yggdrasil_decision_forests/dataset/data_spec.pb.h" #include "yggdrasil_decision_forests/dataset/data_spec_inference.h" +#include "yggdrasil_decision_forests/dataset/vertical_dataset_io.h" #include "yggdrasil_decision_forests/learner/learner_library.h" #include "yggdrasil_decision_forests/metric/metric.h" #include "yggdrasil_decision_forests/metric/report.h" diff --git a/third_party/protobuf/workspace.bzl b/third_party/protobuf/workspace.bzl index aee785e4..2c555b07 100644 --- a/third_party/protobuf/workspace.bzl +++ b/third_party/protobuf/workspace.bzl @@ -1,16 +1,13 @@ """Protobuf project.""" -# We use the protobuf linked in tensorflow. -#load("@bazel_tools//tools/build_defs/repo:http.bzl", "http_archive") +load("@bazel_tools//tools/build_defs/repo:http.bzl", "http_archive") def deps(): - # We use the protobuf linked in tensorflow. - #http_archive( - # name = "com_google_protobuf", - # #strip_prefix = "protobuf-master", - # #urls = ["https://github.com/protocolbuffers/protobuf/archive/master.zip"], - # urls = [" https://github.com/protocolbuffers/protobuf/archive/v3.14.0.zip"], - # strip_prefix = "protobuf-3.14.0", - # sha256 = "bf0e5070b4b99240183b29df78155eee335885e53a8af8683964579c214ad301", - #) - pass + http_archive( + name = "com_google_protobuf", + #strip_prefix = "protobuf-master", + #urls = ["https://github.com/protocolbuffers/protobuf/archive/master.zip"], + urls = [" https://github.com/protocolbuffers/protobuf/archive/v3.14.0.zip"], + strip_prefix = "protobuf-3.14.0", + sha256 = "bf0e5070b4b99240183b29df78155eee335885e53a8af8683964579c214ad301", + ) diff --git a/tools/test_bazel.bat b/tools/test_bazel.bat index ed40ab94..72d379b6 100644 --- a/tools/test_bazel.bat +++ b/tools/test_bazel.bat @@ -14,7 +14,7 @@ :: Compile and runs the unit tests. -set BAZEL=bazel-3.7.2-windows-x86_64.exe +set BAZEL=bazel-4.0.0-windows-x86_64.exe set FLAGS_WO_TF=--config=windows_cpp17 set FLAGS_W_TF=--config=windows_cpp14 --config=use_tensorflow_io diff --git a/tools/test_bazel.sh b/tools/test_bazel.sh index 13209eeb..72944efb 100755 --- a/tools/test_bazel.sh +++ b/tools/test_bazel.sh @@ -19,11 +19,7 @@ set -x set -e -# A specific version of GCC or Clang can be set as follow: -# export CC=gcc-9 -# export CXX=g++-9 - -BAZEL=bazel-3.7.2 +BAZEL=bazel FLAGS="--config=linux_cpp17 --config=linux_avx2" ${BAZEL} build //yggdrasil_decision_forests/cli/...:all ${FLAGS} @@ -36,6 +32,7 @@ for dir in "${subdirs[@]}" do targets="$targets //yggdrasil_decision_forests/${dir}/...:all" done +targets="$targets //examples:beginner_cc" # Note: use_tensorflow_io is required for some of the unit test. ${BAZEL} test ${targets} ${FLAGS} --config=use_tensorflow_io diff --git a/yggdrasil_decision_forests/cli/benchmark_inference.cc b/yggdrasil_decision_forests/cli/benchmark_inference.cc index 12989f53..ed7ff9e0 100644 --- a/yggdrasil_decision_forests/cli/benchmark_inference.cc +++ b/yggdrasil_decision_forests/cli/benchmark_inference.cc @@ -104,7 +104,7 @@ std::string ResultsToString(const RunOptions& options, absl::StrAppendFormat(&report, "batch_size : %d num_runs : %d\n", options.batch_size, options.num_runs); - absl::StrAppendFormat(&report, "time/example(µs) time/batch(µs) method\n"); + absl::StrAppendFormat(&report, "time/example(us) time/batch(us) method\n"); absl::StrAppendFormat(&report, "----------------------------------------\n"); for (const auto& result : results) { absl::StrAppendFormat( diff --git a/yggdrasil_decision_forests/learner/gradient_boosted_trees/gradient_boosted_trees.cc b/yggdrasil_decision_forests/learner/gradient_boosted_trees/gradient_boosted_trees.cc index ee6a1ed5..7e07c62b 100644 --- a/yggdrasil_decision_forests/learner/gradient_boosted_trees/gradient_boosted_trees.cc +++ b/yggdrasil_decision_forests/learner/gradient_boosted_trees/gradient_boosted_trees.cc @@ -98,6 +98,15 @@ constexpr char GradientBoostedTreesLearner::kHParamForestExtraction[]; constexpr char GradientBoostedTreesLearner::kHParamForestExtractionMart[]; constexpr char GradientBoostedTreesLearner::kHParamForestExtractionDart[]; +constexpr char GradientBoostedTreesLearner::kHParamValidationSetRatio[]; +constexpr char GradientBoostedTreesLearner::kHParamEarlyStopping[]; +constexpr char GradientBoostedTreesLearner::kHParamEarlyStoppingNone[]; +constexpr char + GradientBoostedTreesLearner::kHParamEarlyStoppingMinLossFullModel[]; +constexpr char GradientBoostedTreesLearner::kHParamEarlyStoppingLossIncrease[]; +constexpr char + GradientBoostedTreesLearner::kHParamEarlyStoppingNumTreesLookAhead[]; + using dataset::VerticalDataset; using CategoricalColumn = VerticalDataset::CategoricalColumn; @@ -1554,6 +1563,41 @@ absl::Status GradientBoostedTreesLearner::SetHyperParametersImpl( } } + { + const auto hparam = generic_hyper_params->Get(kHParamValidationSetRatio); + if (hparam.has_value()) { + gbt_config->set_validation_set_ratio(hparam.value().value().real()); + } + } + + { + const auto hparam = + generic_hyper_params->Get(kHParamEarlyStoppingNumTreesLookAhead); + if (hparam.has_value()) { + gbt_config->set_early_stopping_num_trees_look_ahead( + hparam.value().value().integer()); + } + } + + { + const auto hparam = generic_hyper_params->Get(kHParamEarlyStopping); + if (hparam.has_value()) { + const auto early_stopping = hparam.value().value().categorical(); + if (early_stopping == kHParamEarlyStoppingNone) { + gbt_config->set_early_stopping( + proto::GradientBoostedTreesTrainingConfig::NONE); + } else if (early_stopping == kHParamEarlyStoppingMinLossFullModel) { + gbt_config->set_early_stopping( + proto::GradientBoostedTreesTrainingConfig:: + MIN_VALIDATION_LOSS_ON_FULL_MODEL); + } else if (early_stopping == kHParamEarlyStoppingLossIncrease) { + gbt_config->set_early_stopping( + proto::GradientBoostedTreesTrainingConfig:: + VALIDATION_LOSS_INCREASE); + } + } + } + return absl::OkStatus(); } @@ -1917,6 +1961,45 @@ GradientBoostedTreesLearner::GetGenericHyperParameterSpecification() const { R"(Ratio of the dataset used to train individual tree for the selective Gradient Boosting (Selective Gradient Boosting for Effective Learning to Rank; Lucchese et al; http://quickrank.isti.cnr.it/selective-data/selective-SIGIR2018.pdf) sampling method.)"); } + { + auto& param = + hparam_def.mutable_fields()->operator[](kHParamValidationSetRatio); + param.mutable_real()->set_minimum(0.f); + param.mutable_real()->set_maximum(1.f); + param.mutable_real()->set_default_value(gbt_config.validation_set_ratio()); + param.mutable_documentation()->set_proto_path(proto_path); + param.mutable_documentation()->set_description( + R"(Ratio of the training dataset used to monitor the training. Require to be >0 if early stopping is enabled.)"); + } + + { + auto& param = hparam_def.mutable_fields()->operator[]( + kHParamEarlyStoppingNumTreesLookAhead); + param.mutable_integer()->set_minimum(1); + param.mutable_integer()->set_default_value( + gbt_config.early_stopping_num_trees_look_ahead()); + param.mutable_documentation()->set_proto_path(proto_path); + param.mutable_documentation()->set_description( + R"(Rolling number of trees used to detect validation loss increase and trigger early stopping.)"); + } + + { + auto& param = hparam_def.mutable_fields()->operator[](kHParamEarlyStopping); + param.mutable_categorical()->set_default_value( + kHParamEarlyStoppingLossIncrease); + param.mutable_categorical()->add_possible_values(kHParamEarlyStoppingNone); + param.mutable_categorical()->add_possible_values( + kHParamEarlyStoppingMinLossFullModel); + param.mutable_categorical()->add_possible_values( + kHParamEarlyStoppingLossIncrease); + param.mutable_documentation()->set_proto_path(proto_path); + param.mutable_documentation()->set_description( + R"(Early stopping detects the overfitting of the model and halts it training using the validation dataset controlled by `validation_ratio`. +- `NONE`: No early stopping. The model is trained entirely. +- `MIN_LOSS_FINAL`: No early stopping. However, the model is then truncated to maximize the validation loss. +- `LOSS_INCREASE`: Stop the training when the validation does not decrease for `early_stopping_num_trees_look_ahead` trees.)"); + } + RETURN_IF_ERROR(decision_tree::GetGenericHyperParameterSpecification( gbt_config.decision_tree(), &hparam_def)); return hparam_def; diff --git a/yggdrasil_decision_forests/learner/gradient_boosted_trees/gradient_boosted_trees.h b/yggdrasil_decision_forests/learner/gradient_boosted_trees/gradient_boosted_trees.h index 60e81fff..d39bf9c2 100644 --- a/yggdrasil_decision_forests/learner/gradient_boosted_trees/gradient_boosted_trees.h +++ b/yggdrasil_decision_forests/learner/gradient_boosted_trees/gradient_boosted_trees.h @@ -108,6 +108,15 @@ class GradientBoostedTreesLearner : public AbstractLearner { static constexpr char kHParamForestExtractionMart[] = "MART"; static constexpr char kHParamForestExtractionDart[] = "DART"; + static constexpr char kHParamValidationSetRatio[] = "validation_ratio"; + static constexpr char kHParamEarlyStopping[] = "early_stopping"; + static constexpr char kHParamEarlyStoppingNone[] = "NONE"; + static constexpr char kHParamEarlyStoppingMinLossFullModel[] = + "MIN_LOSS_FINAL"; + static constexpr char kHParamEarlyStoppingLossIncrease[] = "LOSS_INCREASE"; + static constexpr char kHParamEarlyStoppingNumTreesLookAhead[] = + "early_stopping_num_trees_look_ahead"; + utils::StatusOr> TrainWithStatus( const dataset::VerticalDataset& train_dataset) const override; diff --git a/yggdrasil_decision_forests/learner/gradient_boosted_trees/gradient_boosted_trees_test.cc b/yggdrasil_decision_forests/learner/gradient_boosted_trees/gradient_boosted_trees_test.cc index 3e9d2aa9..72726bde 100644 --- a/yggdrasil_decision_forests/learner/gradient_boosted_trees/gradient_boosted_trees_test.cc +++ b/yggdrasil_decision_forests/learner/gradient_boosted_trees/gradient_boosted_trees_test.cc @@ -1336,6 +1336,21 @@ TEST(GradientBoostedTrees, SetHyperParameters) { GradientBoostedTreesLearner learner{model::proto::TrainingConfig()}; const auto hparam_spec = learner.GetGenericHyperParameterSpecification().value(); + const auto& gbdt_config = + *learner.mutable_training_config()->MutableExtension( + gradient_boosted_trees::proto::gradient_boosted_trees_config); + const float epsilon = 0.0001f; + + // Defaults + EXPECT_EQ(gbdt_config.num_trees(), 300); + EXPECT_FALSE( + learner.training_config().has_maximum_training_duration_seconds()); + EXPECT_NEAR(gbdt_config.validation_set_ratio(), 0.1f, epsilon); + EXPECT_EQ(gbdt_config.early_stopping_num_trees_look_ahead(), 30); + EXPECT_EQ( + gbdt_config.early_stopping(), + proto::GradientBoostedTreesTrainingConfig::VALIDATION_LOSS_INCREASE); + EXPECT_OK(learner.SetHyperParameters(PARSE_TEST_PROTO(R"pb( fields { name: "num_trees" @@ -1344,12 +1359,29 @@ TEST(GradientBoostedTrees, SetHyperParameters) { fields { name: "maximum_training_duration_seconds" value { real: 10 } - })pb"))); - const auto& gbdt_config = learner.training_config().GetExtension( - gradient_boosted_trees::proto::gradient_boosted_trees_config); + } + fields { + name: "validation_ratio" + value { real: 0.2 } + } + fields { + name: "early_stopping_num_trees_look_ahead" + value { integer: 10 } + } + fields { + name: "early_stopping" + value { categorical: "NONE" } + } + )pb"))); + + // Set EXPECT_EQ(gbdt_config.num_trees(), 10); EXPECT_NEAR(learner.training_config().maximum_training_duration_seconds(), 10., 0.001); + EXPECT_NEAR(gbdt_config.validation_set_ratio(), 0.2, epsilon); + EXPECT_EQ(gbdt_config.early_stopping_num_trees_look_ahead(), 10); + EXPECT_EQ(gbdt_config.early_stopping(), + proto::GradientBoostedTreesTrainingConfig::NONE); } TEST(DartPredictionAccumulator, Base) { diff --git a/yggdrasil_decision_forests/utils/filesystem_default.cc b/yggdrasil_decision_forests/utils/filesystem_default.cc index 24312303..ba36e446 100644 --- a/yggdrasil_decision_forests/utils/filesystem_default.cc +++ b/yggdrasil_decision_forests/utils/filesystem_default.cc @@ -90,7 +90,7 @@ absl::Status RecursivelyCreateDir(absl::string_view path, int options) { } absl::Status FileInputByteStream::Open(absl::string_view path) { - file_ = std::fopen(std::string(path).c_str(), "r"); + file_ = std::fopen(std::string(path).c_str(), "rb"); if (!file_) { return absl::Status(absl::StatusCode::kUnknown, absl::StrCat("Failed to open ", path)); @@ -123,7 +123,7 @@ absl::Status FileInputByteStream::Close() { } absl::Status FileOutputByteStream::Open(absl::string_view path) { - file_ = std::fopen(std::string(path).c_str(), "w"); + file_ = std::fopen(std::string(path).c_str(), "wb"); if (!file_) { return absl::Status(absl::StatusCode::kUnknown, absl::StrCat("Failed to open ", path)); diff --git a/yggdrasil_decision_forests/utils/logging_default.h b/yggdrasil_decision_forests/utils/logging_default.h index 5d0c3372..138394d8 100644 --- a/yggdrasil_decision_forests/utils/logging_default.h +++ b/yggdrasil_decision_forests/utils/logging_default.h @@ -133,13 +133,23 @@ enum Severity { INFO, WARNING, FATAL }; namespace internal { +// Extraction the filename from a path. +inline absl::string_view ExtractFilename(absl::string_view path) { + auto last_sep = path.find_last_of("/\\"); + if (last_sep == std::string::npos) { + // Start of filename no found. + return path; + } + return path.substr(last_sep + 1); +} + class LogMessage { public: LogMessage(Severity sev, absl::string_view file, int line) : sev_(sev) { if (!absl::GetFlag(FLAGS_alsologtostderr)) { return; } - std::clog << " ["; + std::clog << "["; switch (sev) { case INFO: std::clog << "INFO"; @@ -154,7 +164,7 @@ class LogMessage { std::clog << "UNDEF"; break; } - std::clog << " " << file << ":" << line << "] "; + std::clog << " " << ExtractFilename(file) << ":" << line << "] "; } virtual ~LogMessage() { @@ -185,7 +195,6 @@ class FatalLogMessage : public LogMessage { [[noreturn]] ~FatalLogMessage() { if (absl::GetFlag(FLAGS_alsologtostderr)) { std::clog << std::endl; - std::clog << "============================" << std::endl; std::clog.flush(); } std::exit(1);