From d6c532135e7f066f9ccb40d157dfe6a44a9c7db5 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Istv=C3=A1n=20Zolt=C3=A1n=20Szab=C3=B3?= Date: Thu, 1 Aug 2024 12:37:07 +0200 Subject: [PATCH 01/36] [DOCS] Adds adaptive_allocations to inference and trained model API docs (#111476) --- .../inference/service-elasticsearch.asciidoc | 48 ++++++++++- .../inference/service-elser.asciidoc | 47 +++++++++- docs/reference/ml/ml-shared.asciidoc | 24 ++++++ .../start-trained-model-deployment.asciidoc | 86 +++++++++++++++---- .../update-trained-model-deployment.asciidoc | 43 +++++++++- 5 files changed, 225 insertions(+), 23 deletions(-) diff --git a/docs/reference/inference/service-elasticsearch.asciidoc b/docs/reference/inference/service-elasticsearch.asciidoc index 6fb0b4a38d0ef..99fd41ee2db65 100644 --- a/docs/reference/inference/service-elasticsearch.asciidoc +++ b/docs/reference/inference/service-elasticsearch.asciidoc @@ -51,6 +51,22 @@ include::inference-shared.asciidoc[tag=service-settings] These settings are specific to the `elasticsearch` service. -- +`adaptive_allocations`::: +(Optional, object) +include::{es-ref-dir}/ml/ml-shared.asciidoc[tag=adaptive-allocation] + +`enabled`:::: +(Optional, Boolean) +include::{es-ref-dir}/ml/ml-shared.asciidoc[tag=adaptive-allocation-enabled] + +`max_number_of_allocations`:::: +(Optional, integer) +include::{es-ref-dir}/ml/ml-shared.asciidoc[tag=adaptive-allocation-max-number] + +`min_number_of_allocations`:::: +(Optional, integer) +include::{es-ref-dir}/ml/ml-shared.asciidoc[tag=adaptive-allocation-min-number] + `model_id`::: (Required, string) The name of the model to use for the {infer} task. @@ -59,7 +75,9 @@ It can be the ID of either a built-in model (for example, `.multilingual-e5-smal `num_allocations`::: (Required, integer) -The total number of allocations this model is assigned across machine learning nodes. Increasing this value generally increases the throughput. +The total number of allocations this model is assigned across machine learning nodes. +Increasing this value generally increases the throughput. +If `adaptive_allocations` is enabled, do not set this value, because it's automatically set. `num_threads`::: (Required, integer) @@ -137,3 +155,31 @@ PUT _inference/text_embedding/my-msmarco-minilm-model <1> <1> Provide an unique identifier for the inference endpoint. The `inference_id` must be unique and must not match the `model_id`. <2> The `model_id` must be the ID of a text embedding model which has already been {ml-docs}/ml-nlp-import-model.html#ml-nlp-import-script[uploaded through Eland]. + +[discrete] +[[inference-example-adaptive-allocation]] +==== Setting adaptive allocation for E5 via the `elasticsearch` service + +The following example shows how to create an {infer} endpoint called +`my-e5-model` to perform a `text_embedding` task type and configure adaptive +allocations. + +The API request below will automatically download the E5 model if it isn't +already downloaded and then deploy the model. + +[source,console] +------------------------------------------------------------ +PUT _inference/text_embedding/my-e5-model +{ + "service": "elasticsearch", + "service_settings": { + "adaptive_allocations": { + "enabled": true, + "min_number_of_allocations": 3, + "max_number_of_allocations": 10 + }, + "model_id": ".multilingual-e5-small" + } +} +------------------------------------------------------------ +// TEST[skip:TBD] \ No newline at end of file diff --git a/docs/reference/inference/service-elser.asciidoc b/docs/reference/inference/service-elser.asciidoc index 34c0f7d0a9c53..fdce94901984b 100644 --- a/docs/reference/inference/service-elser.asciidoc +++ b/docs/reference/inference/service-elser.asciidoc @@ -48,9 +48,27 @@ include::inference-shared.asciidoc[tag=service-settings] These settings are specific to the `elser` service. -- +`adaptive_allocations`::: +(Optional, object) +include::{es-ref-dir}/ml/ml-shared.asciidoc[tag=adaptive-allocation] + +`enabled`:::: +(Optional, Boolean) +include::{es-ref-dir}/ml/ml-shared.asciidoc[tag=adaptive-allocation-enabled] + +`max_number_of_allocations`:::: +(Optional, integer) +include::{es-ref-dir}/ml/ml-shared.asciidoc[tag=adaptive-allocation-max-number] + +`min_number_of_allocations`:::: +(Optional, integer) +include::{es-ref-dir}/ml/ml-shared.asciidoc[tag=adaptive-allocation-min-number] + `num_allocations`::: (Required, integer) -The total number of allocations this model is assigned across machine learning nodes. Increasing this value generally increases the throughput. +The total number of allocations this model is assigned across machine learning nodes. +Increasing this value generally increases the throughput. +If `adaptive_allocations` is enabled, do not set this value, because it's automatically set. `num_threads`::: (Required, integer) @@ -107,3 +125,30 @@ This error usually just reflects a timeout, while the model downloads in the bac You can check the download progress in the {ml-app} UI. If using the Python client, you can set the `timeout` parameter to a higher value. ==== + +[discrete] +[[inference-example-elser-adaptive-allocation]] +==== Setting adaptive allocation for the ELSER service + +The following example shows how to create an {infer} endpoint called +`my-elser-model` to perform a `sparse_embedding` task type and configure +adaptive allocations. + +The request below will automatically download the ELSER model if it isn't +already downloaded and then deploy the model. + +[source,console] +------------------------------------------------------------ +PUT _inference/sparse_embedding/my-elser-model +{ + "service": "elser", + "service_settings": { + "adaptive_allocations": { + "enabled": true, + "min_number_of_allocations": 3, + "max_number_of_allocations": 10 + } + } +} +------------------------------------------------------------ +// TEST[skip:TBD] \ No newline at end of file diff --git a/docs/reference/ml/ml-shared.asciidoc b/docs/reference/ml/ml-shared.asciidoc index a69fd2f1812e9..15a994115c88c 100644 --- a/docs/reference/ml/ml-shared.asciidoc +++ b/docs/reference/ml/ml-shared.asciidoc @@ -1,3 +1,27 @@ +tag::adaptive-allocation[] +Adaptive allocations configuration object. +If enabled, the number of allocations of the model is set based on the current load the process gets. +When the load is high, a new model allocation is automatically created (respecting the value of `max_number_of_allocations` if it's set). +When the load is low, a model allocation is automatically removed (respecting the value of `min_number_of_allocations` if it's set). +The number of model allocations cannot be scaled down to less than `1` this way. +If `adaptive_allocations` is enabled, do not set the number of allocations manually. +end::adaptive-allocation[] + +tag::adaptive-allocation-enabled[] +If `true`, `adaptive_allocations` is enabled. +Defaults to `false`. +end::adaptive-allocation-enabled[] + +tag::adaptive-allocation-max-number[] +Specifies the maximum number of allocations to scale to. +If set, it must be greater than or equal to `min_number_of_allocations`. +end::adaptive-allocation-max-number[] + +tag::adaptive-allocation-min-number[] +Specifies the minimum number of allocations to scale to. +If set, it must be greater than or equal to `1`. +end::adaptive-allocation-min-number[] + tag::aggregations[] If set, the {dfeed} performs aggregation searches. Support for aggregations is limited and should be used only with low cardinality data. For more information, diff --git a/docs/reference/ml/trained-models/apis/start-trained-model-deployment.asciidoc b/docs/reference/ml/trained-models/apis/start-trained-model-deployment.asciidoc index f1b3fffb8a9a2..6f7e2a4d9f988 100644 --- a/docs/reference/ml/trained-models/apis/start-trained-model-deployment.asciidoc +++ b/docs/reference/ml/trained-models/apis/start-trained-model-deployment.asciidoc @@ -30,7 +30,10 @@ must be unique and should not match any other deployment ID or model ID, unless it is the same as the ID of the model being deployed. If `deployment_id` is not set, it defaults to the `model_id`. -Scaling inference performance can be achieved by setting the parameters +You can enable adaptive allocations to automatically scale model allocations up +and down based on the actual resource requirement of the processes. + +Manually scaling inference performance can be achieved by setting the parameters `number_of_allocations` and `threads_per_allocation`. Increasing `threads_per_allocation` means more threads are used when an @@ -58,6 +61,46 @@ include::{es-ref-dir}/ml/ml-shared.asciidoc[tag=model-id] [[start-trained-model-deployment-query-params]] == {api-query-parms-title} +`deployment_id`:: +(Optional, string) +include::{es-ref-dir}/ml/ml-shared.asciidoc[tag=deployment-id] ++ +-- +Defaults to `model_id`. +-- + +`timeout`:: +(Optional, time) +Controls the amount of time to wait for the model to deploy. Defaults to 30 +seconds. + +`wait_for`:: +(Optional, string) +Specifies the allocation status to wait for before returning. Defaults to +`started`. The value `starting` indicates deployment is starting but not yet on +any node. The value `started` indicates the model has started on at least one +node. The value `fully_allocated` indicates the deployment has started on all +valid nodes. + +[[start-trained-model-deployment-request-body]] +== {api-request-body-title} + +`adaptive_allocations`:: +(Optional, object) +include::{es-ref-dir}/ml/ml-shared.asciidoc[tag=adaptive-allocation] + +`enabled`::: +(Optional, Boolean) +include::{es-ref-dir}/ml/ml-shared.asciidoc[tag=adaptive-allocation-enabled] + +`max_number_of_allocations`::: +(Optional, integer) +include::{es-ref-dir}/ml/ml-shared.asciidoc[tag=adaptive-allocation-max-number] + +`min_number_of_allocations`::: +(Optional, integer) +include::{es-ref-dir}/ml/ml-shared.asciidoc[tag=adaptive-allocation-min-number] + `cache_size`:: (Optional, <>) The inference cache size (in memory outside the JVM heap) per node for the @@ -65,15 +108,11 @@ model. In serverless, the cache is disabled by default. Otherwise, the default v `model_size_bytes` field in the <>. To disable the cache, `0b` can be provided. -`deployment_id`:: -(Optional, string) -include::{es-ref-dir}/ml/ml-shared.asciidoc[tag=deployment-id] -Defaults to `model_id`. - `number_of_allocations`:: (Optional, integer) The total number of allocations this model is assigned across {ml} nodes. -Increasing this value generally increases the throughput. Defaults to 1. +Increasing this value generally increases the throughput. Defaults to `1`. +If `adaptive_allocations` is enabled, do not set this value, because it's automatically set. `priority`:: (Optional, string) @@ -110,18 +149,6 @@ compute-bound process; `threads_per_allocations` must not exceed the number of available allocated processors per node. Defaults to 1. Must be a power of 2. Max allowed value is 32. -`timeout`:: -(Optional, time) -Controls the amount of time to wait for the model to deploy. Defaults to 30 -seconds. - -`wait_for`:: -(Optional, string) -Specifies the allocation status to wait for before returning. Defaults to -`started`. The value `starting` indicates deployment is starting but not yet on -any node. The value `started` indicates the model has started on at least one -node. The value `fully_allocated` indicates the deployment has started on all -valid nodes. [[start-trained-model-deployment-example]] == {api-examples-title} @@ -182,3 +209,24 @@ The `my_model` trained model can be deployed again with a different ID: POST _ml/trained_models/my_model/deployment/_start?deployment_id=my_model_for_search -------------------------------------------------- // TEST[skip:TBD] + + +[[start-trained-model-deployment-adaptive-allocation-example]] +=== Setting adaptive allocations + +The following example starts a new deployment of the `my_model` trained model +with the ID `my_model_for_search` and enables adaptive allocations with the +minimum number of 3 allocations and the maximum number of 10. + +[source,console] +-------------------------------------------------- +POST _ml/trained_models/my_model/deployment/_start?deployment_id=my_model_for_search +{ + "adaptive_allocations": { + "enabled": true, + "min_number_of_allocations": 3, + "max_number_of_allocations": 10 + } +} +-------------------------------------------------- +// TEST[skip:TBD] \ No newline at end of file diff --git a/docs/reference/ml/trained-models/apis/update-trained-model-deployment.asciidoc b/docs/reference/ml/trained-models/apis/update-trained-model-deployment.asciidoc index ea5508fac26dd..d49ee3c6e872c 100644 --- a/docs/reference/ml/trained-models/apis/update-trained-model-deployment.asciidoc +++ b/docs/reference/ml/trained-models/apis/update-trained-model-deployment.asciidoc @@ -25,7 +25,11 @@ Requires the `manage_ml` cluster privilege. This privilege is included in the == {api-description-title} You can update a trained model deployment whose `assignment_state` is `started`. -You can either increase or decrease the number of allocations of such a deployment. +You can enable adaptive allocations to automatically scale model allocations up +and down based on the actual resource requirement of the processes. +Or you can manually increase or decrease the number of allocations of a model +deployment. + [[update-trained-model-deployments-path-parms]] == {api-path-parms-title} @@ -37,17 +41,34 @@ include::{es-ref-dir}/ml/ml-shared.asciidoc[tag=deployment-id] [[update-trained-model-deployment-request-body]] == {api-request-body-title} +`adaptive_allocations`:: +(Optional, object) +include::{es-ref-dir}/ml/ml-shared.asciidoc[tag=adaptive-allocation] + +`enabled`::: +(Optional, Boolean) +include::{es-ref-dir}/ml/ml-shared.asciidoc[tag=adaptive-allocation-enabled] + +`max_number_of_allocations`::: +(Optional, integer) +include::{es-ref-dir}/ml/ml-shared.asciidoc[tag=adaptive-allocation-max-number] + +`min_number_of_allocations`::: +(Optional, integer) +include::{es-ref-dir}/ml/ml-shared.asciidoc[tag=adaptive-allocation-min-number] + `number_of_allocations`:: (Optional, integer) The total number of allocations this model is assigned across {ml} nodes. Increasing this value generally increases the throughput. +If `adaptive_allocations` is enabled, do not set this value, because it's automatically set. [[update-trained-model-deployment-example]] == {api-examples-title} The following example updates the deployment for a - `elastic__distilbert-base-uncased-finetuned-conll03-english` trained model to have 4 allocations: +`elastic__distilbert-base-uncased-finetuned-conll03-english` trained model to have 4 allocations: [source,console] -------------------------------------------------- @@ -84,3 +105,21 @@ The API returns the following results: } } ---- + +The following example updates the deployment for a +`elastic__distilbert-base-uncased-finetuned-conll03-english` trained model to +enable adaptive allocations with the minimum number of 3 allocations and the +maximum number of 10: + +[source,console] +-------------------------------------------------- +POST _ml/trained_models/elastic__distilbert-base-uncased-finetuned-conll03-english/deployment/_update +{ + "adaptive_allocations": { + "enabled": true, + "min_number_of_allocations": 3, + "max_number_of_allocations": 10 + } +} +-------------------------------------------------- +// TEST[skip:TBD] \ No newline at end of file From dfbedb20fab5b486b50150bd946be92816c972c1 Mon Sep 17 00:00:00 2001 From: Nik Everett Date: Thu, 1 Aug 2024 06:41:13 -0400 Subject: [PATCH 02/36] ESQL: Fix some test randomization (#111496) Fix a test that would sometimes get stuck in an infinite loop because it couldn't randomize some data. In some cases the configuration would lock it to never making changes. In that case, we have to randomize in a different way. Closes #111480 --- muted-tests.yml | 3 --- ...AbstractLogicalPlanSerializationTests.java | 4 ++-- .../logical/LookupSerializationTests.java | 6 ++--- ...a => LocalRelationSerializationTests.java} | 23 ++++++++++++++----- 4 files changed, 22 insertions(+), 14 deletions(-) rename x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/plan/logical/local/{LocalRelationSerialiationTests.java => LocalRelationSerializationTests.java} (64%) diff --git a/muted-tests.yml b/muted-tests.yml index cbd6b14c07dd3..4635bf9541acb 100644 --- a/muted-tests.yml +++ b/muted-tests.yml @@ -138,9 +138,6 @@ tests: - class: org.elasticsearch.xpack.esql.expression.function.scalar.multivalue.MvPSeriesWeightedSumTests method: testFold {TestCase=, } issue: https://github.com/elastic/elasticsearch/issues/111479 -- class: org.elasticsearch.xpack.esql.plan.logical.local.LocalRelationSerialiationTests - method: testEqualsAndHashcode - issue: https://github.com/elastic/elasticsearch/issues/111480 - class: org.elasticsearch.xpack.esql.expression.function.scalar.multivalue.MvPSeriesWeightedSumTests method: testEvaluateBlockWithoutNulls {TestCase=, } issue: https://github.com/elastic/elasticsearch/issues/111498 diff --git a/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/plan/logical/AbstractLogicalPlanSerializationTests.java b/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/plan/logical/AbstractLogicalPlanSerializationTests.java index d9e11dcb61d52..a2175c3a92ab0 100644 --- a/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/plan/logical/AbstractLogicalPlanSerializationTests.java +++ b/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/plan/logical/AbstractLogicalPlanSerializationTests.java @@ -15,7 +15,7 @@ import org.elasticsearch.xpack.esql.core.type.EsField; import org.elasticsearch.xpack.esql.expression.function.FieldAttributeTests; import org.elasticsearch.xpack.esql.plan.AbstractNodeSerializationTests; -import org.elasticsearch.xpack.esql.plan.logical.local.LocalRelationSerialiationTests; +import org.elasticsearch.xpack.esql.plan.logical.local.LocalRelationSerializationTests; import java.util.ArrayList; import java.util.List; @@ -27,7 +27,7 @@ public static LogicalPlan randomChild(int depth) { return LookupSerializationTests.randomLookup(depth + 1); } // TODO more random options - return LocalRelationSerialiationTests.randomLocalRelation(); + return LocalRelationSerializationTests.randomLocalRelation(); } public static List randomFieldAttributes(int min, int max, boolean onlyRepresentable) { diff --git a/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/plan/logical/LookupSerializationTests.java b/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/plan/logical/LookupSerializationTests.java index ed9199784809b..53f36e124ebb0 100644 --- a/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/plan/logical/LookupSerializationTests.java +++ b/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/plan/logical/LookupSerializationTests.java @@ -12,7 +12,7 @@ import org.elasticsearch.xpack.esql.core.tree.Source; import org.elasticsearch.xpack.esql.expression.AbstractExpressionSerializationTests; import org.elasticsearch.xpack.esql.plan.logical.local.LocalRelation; -import org.elasticsearch.xpack.esql.plan.logical.local.LocalRelationSerialiationTests; +import org.elasticsearch.xpack.esql.plan.logical.local.LocalRelationSerializationTests; import java.io.IOException; import java.util.List; @@ -23,7 +23,7 @@ public static Lookup randomLookup(int depth) { LogicalPlan child = randomChild(depth); Expression tableName = AbstractExpressionSerializationTests.randomChild(); List matchFields = randomFieldAttributes(1, 10, false); - LocalRelation localRelation = randomBoolean() ? null : LocalRelationSerialiationTests.randomLocalRelation(); + LocalRelation localRelation = randomBoolean() ? null : LocalRelationSerializationTests.randomLocalRelation(); return new Lookup(source, child, tableName, matchFields, localRelation); } @@ -45,7 +45,7 @@ protected Lookup mutateInstance(Lookup instance) throws IOException { case 2 -> matchFields = randomValueOtherThan(matchFields, () -> randomFieldAttributes(1, 10, false)); case 3 -> localRelation = randomValueOtherThan( localRelation, - () -> randomBoolean() ? null : LocalRelationSerialiationTests.randomLocalRelation() + () -> randomBoolean() ? null : LocalRelationSerializationTests.randomLocalRelation() ); } return new Lookup(source, child, tableName, matchFields, localRelation); diff --git a/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/plan/logical/local/LocalRelationSerialiationTests.java b/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/plan/logical/local/LocalRelationSerializationTests.java similarity index 64% rename from x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/plan/logical/local/LocalRelationSerialiationTests.java rename to x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/plan/logical/local/LocalRelationSerializationTests.java index ca5227538815f..b8fb67737ec16 100644 --- a/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/plan/logical/local/LocalRelationSerialiationTests.java +++ b/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/plan/logical/local/LocalRelationSerializationTests.java @@ -18,7 +18,7 @@ import java.io.IOException; import java.util.List; -public class LocalRelationSerialiationTests extends AbstractLogicalPlanSerializationTests { +public class LocalRelationSerializationTests extends AbstractLogicalPlanSerializationTests { public static LocalRelation randomLocalRelation() { Source source = randomSource(); List output = randomFieldAttributes(1, 10, true); @@ -45,14 +45,25 @@ protected LocalRelation createTestInstance() { @Override protected LocalRelation mutateInstance(LocalRelation instance) throws IOException { + /* + * There are two ways we could mutate this. Either we mutate just + * the data, or we mutate the attributes and the data. Some attributes + * don't *allow* for us to mutate the data. For example, if the attributes + * are all NULL typed. In that case we can't mutate the data. + * + * So we flip a coin. If that lands on true, we *try* to modify that data. + * If that spits out the same data - or if the coin lands on false - we'll + * modify the attributes and the data. + */ if (randomBoolean()) { List output = instance.output(); - LocalSupplier supplier = randomValueOtherThan(instance.supplier(), () -> randomLocalSupplier(output)); - return new LocalRelation(instance.source(), output, supplier); - } else { - List output = randomValueOtherThan(instance.output(), () -> randomFieldAttributes(1, 10, true)); LocalSupplier supplier = randomLocalSupplier(output); - return new LocalRelation(instance.source(), output, supplier); + if (supplier.equals(instance.supplier()) == false) { + return new LocalRelation(instance.source(), output, supplier); + } } + List output = randomValueOtherThan(instance.output(), () -> randomFieldAttributes(1, 10, true)); + LocalSupplier supplier = randomLocalSupplier(output); + return new LocalRelation(instance.source(), output, supplier); } } From c5da25754007af51029a676a8784b16be16d8995 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Iv=C3=A1n=20Cea=20Fontenla?= Date: Thu, 1 Aug 2024 14:16:26 +0200 Subject: [PATCH 03/36] Add test cases for nulls and wrong types to aggregation tests (#111482) - Migrated the anyNullIsNull and wrong types cases to `AbstractFunctionTestCase` - Minor fixes on anyNullIsNull to work with multi-row values. Just some conditions to return a List of null instead of a null. Everything else in these functions was mostly untouched - Implemented it in some aggregations - Fixed some errors around the aggregation tests code Not all aggregations were migrated. Many of them have edge cases that don't work with some of those things. For example, if `WEIGHTED_AVG(value, weight)` has a literal on the value, it ignores the weight, which makes anyNullIsNull fail as it expects a null return. Such cases can be handled later. Closes https://github.com/elastic/elasticsearch/issues/109917 --- .../expression/function/aggregate/Max.java | 2 +- .../expression/function/aggregate/Min.java | 2 +- .../expression/function/aggregate/Values.java | 11 +- .../xpack/esql/analysis/AnalyzerTests.java | 8 +- .../xpack/esql/analysis/VerifierTests.java | 4 +- .../function/AbstractAggregationTestCase.java | 67 ++-- .../function/AbstractFunctionTestCase.java | 330 ++++++++++++++++++ .../AbstractScalarFunctionTestCase.java | 327 ----------------- .../expression/function/TestCaseSupplier.java | 4 +- .../function/aggregate/AvgTests.java | 2 +- .../function/aggregate/MaxTests.java | 73 +--- .../function/aggregate/MinTests.java | 73 +--- .../function/aggregate/ValuesTests.java | 6 +- .../function/aggregate/WeightedAvgTests.java | 18 +- 14 files changed, 417 insertions(+), 510 deletions(-) diff --git a/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/expression/function/aggregate/Max.java b/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/expression/function/aggregate/Max.java index 4438ccec04c4c..22224628e23ad 100644 --- a/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/expression/function/aggregate/Max.java +++ b/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/expression/function/aggregate/Max.java @@ -76,7 +76,7 @@ public Max replaceChildren(List newChildren) { @Override protected TypeResolution resolveType() { return TypeResolutions.isType( - this, + field(), e -> e == DataType.BOOLEAN || e == DataType.DATETIME || e == DataType.IP || (e.isNumeric() && e != DataType.UNSIGNED_LONG), sourceText(), DEFAULT, diff --git a/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/expression/function/aggregate/Min.java b/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/expression/function/aggregate/Min.java index 490d227206e06..8e7bb6bc3e799 100644 --- a/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/expression/function/aggregate/Min.java +++ b/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/expression/function/aggregate/Min.java @@ -76,7 +76,7 @@ public Min replaceChildren(List newChildren) { @Override protected TypeResolution resolveType() { return TypeResolutions.isType( - this, + field(), e -> e == DataType.BOOLEAN || e == DataType.DATETIME || e == DataType.IP || (e.isNumeric() && e != DataType.UNSIGNED_LONG), sourceText(), DEFAULT, diff --git a/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/expression/function/aggregate/Values.java b/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/expression/function/aggregate/Values.java index 79276b26be6d5..136e1233601f9 100644 --- a/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/expression/function/aggregate/Values.java +++ b/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/expression/function/aggregate/Values.java @@ -17,10 +17,10 @@ import org.elasticsearch.compute.aggregation.ValuesLongAggregatorFunctionSupplier; import org.elasticsearch.xpack.esql.EsqlIllegalArgumentException; import org.elasticsearch.xpack.esql.core.expression.Expression; +import org.elasticsearch.xpack.esql.core.expression.TypeResolutions; import org.elasticsearch.xpack.esql.core.tree.NodeInfo; import org.elasticsearch.xpack.esql.core.tree.Source; import org.elasticsearch.xpack.esql.core.type.DataType; -import org.elasticsearch.xpack.esql.expression.EsqlTypeResolutions; import org.elasticsearch.xpack.esql.expression.function.Example; import org.elasticsearch.xpack.esql.expression.function.FunctionInfo; import org.elasticsearch.xpack.esql.expression.function.Param; @@ -30,6 +30,7 @@ import java.util.List; import static org.elasticsearch.xpack.esql.core.expression.TypeResolutions.ParamOrdinal.DEFAULT; +import static org.elasticsearch.xpack.esql.core.type.DataType.UNSIGNED_LONG; public class Values extends AggregateFunction implements ToAggregator { public static final NamedWriteableRegistry.Entry ENTRY = new NamedWriteableRegistry.Entry(Expression.class, "Values", Values::new); @@ -84,7 +85,13 @@ public DataType dataType() { @Override protected TypeResolution resolveType() { - return EsqlTypeResolutions.isNotSpatial(field(), sourceText(), DEFAULT); + return TypeResolutions.isType( + field(), + dt -> DataType.isSpatial(dt) == false && dt != UNSIGNED_LONG, + sourceText(), + DEFAULT, + "any type except unsigned_long and spatial types" + ); } @Override diff --git a/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/analysis/AnalyzerTests.java b/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/analysis/AnalyzerTests.java index 7333bd0e9f8a6..f0dd72e18ac2f 100644 --- a/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/analysis/AnalyzerTests.java +++ b/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/analysis/AnalyzerTests.java @@ -1834,13 +1834,13 @@ public void testUnsupportedTypesInStats() { line 2:20: argument of [count_distinct(x)] must be [any exact type except unsigned_long, _source, or counter types],\ found value [x] type [unsigned_long] line 2:39: argument of [max(x)] must be [boolean, datetime, ip or numeric except unsigned_long or counter types],\ - found value [max(x)] type [unsigned_long] + found value [x] type [unsigned_long] line 2:47: argument of [median(x)] must be [numeric except unsigned_long or counter types],\ found value [x] type [unsigned_long] line 2:58: argument of [median_absolute_deviation(x)] must be [numeric except unsigned_long or counter types],\ found value [x] type [unsigned_long] line 2:88: argument of [min(x)] must be [boolean, datetime, ip or numeric except unsigned_long or counter types],\ - found value [min(x)] type [unsigned_long] + found value [x] type [unsigned_long] line 2:96: first argument of [percentile(x, 10)] must be [numeric except unsigned_long],\ found value [x] type [unsigned_long] line 2:115: argument of [sum(x)] must be [numeric except unsigned_long or counter types],\ @@ -1854,13 +1854,13 @@ public void testUnsupportedTypesInStats() { line 2:10: argument of [avg(x)] must be [numeric except unsigned_long or counter types],\ found value [x] type [version] line 2:18: argument of [max(x)] must be [boolean, datetime, ip or numeric except unsigned_long or counter types],\ - found value [max(x)] type [version] + found value [x] type [version] line 2:26: argument of [median(x)] must be [numeric except unsigned_long or counter types],\ found value [x] type [version] line 2:37: argument of [median_absolute_deviation(x)] must be [numeric except unsigned_long or counter types],\ found value [x] type [version] line 2:67: argument of [min(x)] must be [boolean, datetime, ip or numeric except unsigned_long or counter types],\ - found value [min(x)] type [version] + found value [x] type [version] line 2:75: first argument of [percentile(x, 10)] must be [numeric except unsigned_long], found value [x] type [version] line 2:94: argument of [sum(x)] must be [numeric except unsigned_long or counter types], found value [x] type [version]"""); } diff --git a/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/analysis/VerifierTests.java b/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/analysis/VerifierTests.java index 08b1ef9f6fef6..49372da04d8c3 100644 --- a/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/analysis/VerifierTests.java +++ b/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/analysis/VerifierTests.java @@ -494,7 +494,7 @@ public void testAggregateOnCounter() { equalTo( "1:20: argument of [min(network.bytes_in)] must be" + " [boolean, datetime, ip or numeric except unsigned_long or counter types]," - + " found value [min(network.bytes_in)] type [counter_long]" + + " found value [network.bytes_in] type [counter_long]" ) ); @@ -503,7 +503,7 @@ public void testAggregateOnCounter() { equalTo( "1:20: argument of [max(network.bytes_in)] must be" + " [boolean, datetime, ip or numeric except unsigned_long or counter types]," - + " found value [max(network.bytes_in)] type [counter_long]" + + " found value [network.bytes_in] type [counter_long]" ) ); diff --git a/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/expression/function/AbstractAggregationTestCase.java b/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/expression/function/AbstractAggregationTestCase.java index 25ff4f9c2122d..65425486ea4e0 100644 --- a/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/expression/function/AbstractAggregationTestCase.java +++ b/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/expression/function/AbstractAggregationTestCase.java @@ -57,6 +57,25 @@ public abstract class AbstractAggregationTestCase extends AbstractFunctionTestCa * Use if possible, as this method may get updated with new checks in the future. *

*/ + protected static Iterable parameterSuppliersFromTypedDataWithDefaultChecks( + List suppliers, + boolean entirelyNullPreservesType, + PositionalErrorMessageSupplier positionalErrorMessageSupplier + ) { + return parameterSuppliersFromTypedData( + errorsForCasesWithoutExamples( + withNoRowsExpectingNull(anyNullIsNull(entirelyNullPreservesType, randomizeBytesRefsOffset(suppliers))), + positionalErrorMessageSupplier + ) + ); + } + + // TODO: Remove and migrate everything to the method with all the parameters + /** + * @deprecated Use {@link #parameterSuppliersFromTypedDataWithDefaultChecks(List, boolean, PositionalErrorMessageSupplier)} instead. + * This method doesn't add all the default checks. + */ + @Deprecated protected static Iterable parameterSuppliersFromTypedDataWithDefaultChecks(List suppliers) { return parameterSuppliersFromTypedData(withNoRowsExpectingNull(randomizeBytesRefsOffset(suppliers))); } @@ -119,24 +138,9 @@ public void testFold() { Expression expression = buildLiteralExpression(testCase); resolveExpression(expression, aggregatorFunctionSupplier -> { - // An aggregation cannot be folded - }, evaluableExpression -> { - assertTrue(evaluableExpression.foldable()); - if (testCase.foldingExceptionClass() == null) { - Object result = evaluableExpression.fold(); - // Decode unsigned longs into BigIntegers - if (testCase.expectedType() == DataType.UNSIGNED_LONG && result != null) { - result = NumericUtils.unsignedLongAsBigInteger((Long) result); - } - assertThat(result, testCase.getMatcher()); - if (testCase.getExpectedWarnings() != null) { - assertWarnings(testCase.getExpectedWarnings()); - } - } else { - Throwable t = expectThrows(testCase.foldingExceptionClass(), evaluableExpression::fold); - assertThat(t.getMessage(), equalTo(testCase.foldingExceptionMessage())); - } - }); + // An aggregation cannot be folded. + // It's not an error either as not all aggregations are foldable. + }, this::evaluate); } private void aggregateSingleMode(Expression expression) { @@ -263,13 +267,19 @@ private void aggregateWithIntermediates(Expression expression) { } private void evaluate(Expression evaluableExpression) { - Object result; - try (var evaluator = evaluator(evaluableExpression).get(driverContext())) { - try (Block block = evaluator.eval(row(testCase.getDataValues()))) { - result = toJavaObjectUnsignedLongAware(block, 0); - } + assertTrue(evaluableExpression.foldable()); + + if (testCase.foldingExceptionClass() != null) { + Throwable t = expectThrows(testCase.foldingExceptionClass(), evaluableExpression::fold); + assertThat(t.getMessage(), equalTo(testCase.foldingExceptionMessage())); + return; } + Object result = evaluableExpression.fold(); + // Decode unsigned longs into BigIntegers + if (testCase.expectedType() == DataType.UNSIGNED_LONG && result != null) { + result = NumericUtils.unsignedLongAsBigInteger((Long) result); + } assertThat(result, not(equalTo(Double.NaN))); assert testCase.getMatcher().matches(Double.POSITIVE_INFINITY) == false; assertThat(result, not(equalTo(Double.POSITIVE_INFINITY))); @@ -435,16 +445,23 @@ private IntBlock makeGroupsVector(int groupStart, int groupEnd, int rowCount) { */ private void processPageGrouping(GroupingAggregator aggregator, Page inputPage, int groupCount) { var groupSliceSize = 1; + var allValuesNull = IntStream.range(0, inputPage.getBlockCount()) + .mapToObj(inputPage::getBlock) + .anyMatch(Block::areAllValuesNull); // Add data to chunks of groups for (int currentGroupOffset = 0; currentGroupOffset < groupCount;) { - var seenGroupIds = new SeenGroupIds.Range(0, currentGroupOffset + groupSliceSize); + int groupSliceRemainingSize = Math.min(groupSliceSize, groupCount - currentGroupOffset); + var seenGroupIds = new SeenGroupIds.Range(0, allValuesNull ? 0 : currentGroupOffset + groupSliceRemainingSize); var addInput = aggregator.prepareProcessPage(seenGroupIds, inputPage); var positionCount = inputPage.getPositionCount(); var dataSliceSize = 1; // Divide data in chunks for (int currentDataOffset = 0; currentDataOffset < positionCount;) { - try (var groups = makeGroupsVector(currentGroupOffset, currentGroupOffset + groupSliceSize, dataSliceSize)) { + int dataSliceRemainingSize = Math.min(dataSliceSize, positionCount - currentDataOffset); + try ( + var groups = makeGroupsVector(currentGroupOffset, currentGroupOffset + groupSliceRemainingSize, dataSliceRemainingSize) + ) { addInput.add(currentDataOffset, groups); } diff --git a/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/expression/function/AbstractFunctionTestCase.java b/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/expression/function/AbstractFunctionTestCase.java index 20c583d3ac898..0c4bd6fe38b6a 100644 --- a/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/expression/function/AbstractFunctionTestCase.java +++ b/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/expression/function/AbstractFunctionTestCase.java @@ -40,6 +40,7 @@ import org.elasticsearch.xpack.esql.core.expression.Expression; import org.elasticsearch.xpack.esql.core.expression.FieldAttribute; import org.elasticsearch.xpack.esql.core.expression.Literal; +import org.elasticsearch.xpack.esql.core.expression.TypeResolutions; import org.elasticsearch.xpack.esql.core.expression.predicate.nulls.IsNotNull; import org.elasticsearch.xpack.esql.core.expression.predicate.nulls.IsNull; import org.elasticsearch.xpack.esql.core.session.Configuration; @@ -49,6 +50,8 @@ import org.elasticsearch.xpack.esql.core.util.NumericUtils; import org.elasticsearch.xpack.esql.core.util.StringUtils; import org.elasticsearch.xpack.esql.evaluator.EvalMapper; +import org.elasticsearch.xpack.esql.expression.function.scalar.conditional.Greatest; +import org.elasticsearch.xpack.esql.expression.function.scalar.nulls.Coalesce; import org.elasticsearch.xpack.esql.expression.function.scalar.string.RLike; import org.elasticsearch.xpack.esql.expression.function.scalar.string.WildcardLike; import org.elasticsearch.xpack.esql.expression.predicate.operator.arithmetic.Add; @@ -69,6 +72,7 @@ import org.elasticsearch.xpack.esql.planner.Layout; import org.elasticsearch.xpack.esql.planner.PlannerUtils; import org.elasticsearch.xpack.versionfield.Version; +import org.hamcrest.Matcher; import org.junit.After; import org.junit.AfterClass; @@ -95,6 +99,8 @@ import java.util.TreeSet; import java.util.function.Function; import java.util.stream.Collectors; +import java.util.stream.IntStream; +import java.util.stream.Stream; import static java.util.Map.entry; import static org.elasticsearch.compute.data.BlockUtils.toJavaObject; @@ -106,6 +112,7 @@ import static org.hamcrest.Matchers.equalTo; import static org.hamcrest.Matchers.hasSize; import static org.hamcrest.Matchers.instanceOf; +import static org.hamcrest.Matchers.nullValue; /** * Base class for function tests. @@ -191,6 +198,318 @@ protected static Iterable parameterSuppliersFromTypedData(List + * Note: This won't add more than a single null to any existing test case, + * just to keep the number of test cases from exploding totally. + *

+ * + * @param entirelyNullPreservesType should a test case that only contains parameters + * with the {@code null} type keep it's expected type? + * This is mostly going to be {@code true} + * except for functions that base their type entirely + * on input types like {@link Greatest} or {@link Coalesce}. + */ + protected static List anyNullIsNull(boolean entirelyNullPreservesType, List testCaseSuppliers) { + return anyNullIsNull( + testCaseSuppliers, + (nullPosition, nullValueDataType, original) -> entirelyNullPreservesType == false + && nullValueDataType == DataType.NULL + && original.getData().size() == 1 ? DataType.NULL : original.expectedType(), + (nullPosition, nullData, original) -> original + ); + } + + public interface ExpectedType { + DataType expectedType(int nullPosition, DataType nullValueDataType, TestCaseSupplier.TestCase original); + } + + public interface ExpectedEvaluatorToString { + Matcher evaluatorToString(int nullPosition, TestCaseSupplier.TypedData nullData, Matcher original); + } + + protected static List anyNullIsNull( + List testCaseSuppliers, + ExpectedType expectedType, + ExpectedEvaluatorToString evaluatorToString + ) { + typesRequired(testCaseSuppliers); + List suppliers = new ArrayList<>(testCaseSuppliers.size()); + suppliers.addAll(testCaseSuppliers); + + /* + * For each original test case, add as many copies as there were + * arguments, replacing one of the arguments with null and keeping + * the others. + * + * Also, if this was the first time we saw the signature we copy it + * *again*, replacing the argument with null, but annotating the + * argument's type as `null` explicitly. + */ + Set> uniqueSignatures = new HashSet<>(); + for (TestCaseSupplier original : testCaseSuppliers) { + boolean firstTimeSeenSignature = uniqueSignatures.add(original.types()); + for (int nullPosition = 0; nullPosition < original.types().size(); nullPosition++) { + int finalNullPosition = nullPosition; + suppliers.add(new TestCaseSupplier(original.name() + " null in " + nullPosition, original.types(), () -> { + TestCaseSupplier.TestCase oc = original.get(); + List data = IntStream.range(0, oc.getData().size()).mapToObj(i -> { + TestCaseSupplier.TypedData od = oc.getData().get(i); + if (i != finalNullPosition) { + return od; + } + return od.withData(od.isMultiRow() ? Collections.singletonList(null) : null); + }).toList(); + TestCaseSupplier.TypedData nulledData = oc.getData().get(finalNullPosition); + return new TestCaseSupplier.TestCase( + data, + evaluatorToString.evaluatorToString(finalNullPosition, nulledData, oc.evaluatorToString()), + expectedType.expectedType(finalNullPosition, nulledData.type(), oc), + nullValue(), + null, + oc.getExpectedTypeError(), + null, + null + ); + })); + + if (firstTimeSeenSignature) { + List typesWithNull = IntStream.range(0, original.types().size()) + .mapToObj(i -> i == finalNullPosition ? DataType.NULL : original.types().get(i)) + .toList(); + boolean newSignature = uniqueSignatures.add(typesWithNull); + if (newSignature) { + suppliers.add(new TestCaseSupplier(typesWithNull, () -> { + TestCaseSupplier.TestCase oc = original.get(); + List data = IntStream.range(0, oc.getData().size()) + .mapToObj( + i -> i == finalNullPosition + ? (oc.getData().get(i).isMultiRow() + ? TestCaseSupplier.TypedData.MULTI_ROW_NULL + : TestCaseSupplier.TypedData.NULL) + : oc.getData().get(i) + ) + .toList(); + return new TestCaseSupplier.TestCase( + data, + equalTo("LiteralsEvaluator[lit=null]"), + expectedType.expectedType(finalNullPosition, DataType.NULL, oc), + nullValue(), + null, + oc.getExpectedTypeError(), + null, + null + ); + })); + } + } + } + } + + return suppliers; + } + + @FunctionalInterface + protected interface PositionalErrorMessageSupplier { + /** + * This interface defines functions to supply error messages for incorrect types in specific positions. Functions which have + * the same type requirements for all positions can simplify this with a lambda returning a string constant. + * + * @param validForPosition - the set of {@link DataType}s that the test infrastructure believes to be allowable in the + * given position. + * @param position - the zero-index position in the list of parameters the function has detected the bad argument to be. + * @return The string describing the acceptable parameters for that position. Note that this function should not return + * the full error string; that will be constructed by the test. Just return the type string for that position. + */ + String apply(Set validForPosition, int position); + } + + /** + * Adds test cases containing unsupported parameter types that assert + * that they throw type errors. + */ + protected static List errorsForCasesWithoutExamples( + List testCaseSuppliers, + PositionalErrorMessageSupplier positionalErrorMessageSupplier + ) { + return errorsForCasesWithoutExamples(testCaseSuppliers, (i, v, t) -> typeErrorMessage(i, v, t, positionalErrorMessageSupplier)); + } + + /** + * Build the expected error message for an invalid type signature. + */ + protected static String typeErrorMessage( + boolean includeOrdinal, + List> validPerPosition, + List types, + PositionalErrorMessageSupplier expectedTypeSupplier + ) { + int badArgPosition = -1; + for (int i = 0; i < types.size(); i++) { + if (validPerPosition.get(i).contains(types.get(i)) == false) { + badArgPosition = i; + break; + } + } + if (badArgPosition == -1) { + throw new IllegalStateException( + "Can't generate error message for these types, you probably need a custom error message function" + ); + } + String ordinal = includeOrdinal ? TypeResolutions.ParamOrdinal.fromIndex(badArgPosition).name().toLowerCase(Locale.ROOT) + " " : ""; + String expectedTypeString = expectedTypeSupplier.apply(validPerPosition.get(badArgPosition), badArgPosition); + String name = types.get(badArgPosition).typeName(); + return ordinal + "argument of [] must be [" + expectedTypeString + "], found value [" + name + "] type [" + name + "]"; + } + + @FunctionalInterface + protected interface TypeErrorMessageSupplier { + String apply(boolean includeOrdinal, List> validPerPosition, List types); + } + + protected static List errorsForCasesWithoutExamples( + List testCaseSuppliers, + TypeErrorMessageSupplier typeErrorMessageSupplier + ) { + typesRequired(testCaseSuppliers); + List suppliers = new ArrayList<>(testCaseSuppliers.size()); + suppliers.addAll(testCaseSuppliers); + + Set> valid = testCaseSuppliers.stream().map(TestCaseSupplier::types).collect(Collectors.toSet()); + List> validPerPosition = validPerPosition(valid); + + testCaseSuppliers.stream() + .map(s -> s.types().size()) + .collect(Collectors.toSet()) + .stream() + .flatMap(count -> allPermutations(count)) + .filter(types -> valid.contains(types) == false) + /* + * Skip any cases with more than one null. Our tests don't generate + * the full combinatorial explosions of all nulls - just a single null. + * Hopefully , cases will function the same as , + * cases. + */.filter(types -> types.stream().filter(t -> t == DataType.NULL).count() <= 1) + .map(types -> typeErrorSupplier(validPerPosition.size() != 1, validPerPosition, types, typeErrorMessageSupplier)) + .forEach(suppliers::add); + return suppliers; + } + + private static List append(List orig, DataType extra) { + List longer = new ArrayList<>(orig.size() + 1); + longer.addAll(orig); + longer.add(extra); + return longer; + } + + protected static Stream representable() { + return DataType.types().stream().filter(DataType::isRepresentable); + } + + protected static TestCaseSupplier typeErrorSupplier( + boolean includeOrdinal, + List> validPerPosition, + List types, + PositionalErrorMessageSupplier errorMessageSupplier + ) { + return typeErrorSupplier(includeOrdinal, validPerPosition, types, (o, v, t) -> typeErrorMessage(o, v, t, errorMessageSupplier)); + } + + /** + * Build a test case that asserts that the combination of parameter types is an error. + */ + protected static TestCaseSupplier typeErrorSupplier( + boolean includeOrdinal, + List> validPerPosition, + List types, + TypeErrorMessageSupplier errorMessageSupplier + ) { + return new TestCaseSupplier( + "type error for " + TestCaseSupplier.nameFromTypes(types), + types, + () -> TestCaseSupplier.TestCase.typeError( + types.stream().map(type -> new TestCaseSupplier.TypedData(randomLiteral(type).value(), type, type.typeName())).toList(), + errorMessageSupplier.apply(includeOrdinal, validPerPosition, types) + ) + ); + } + + private static List> validPerPosition(Set> valid) { + int max = valid.stream().mapToInt(List::size).max().getAsInt(); + List> result = new ArrayList<>(max); + for (int i = 0; i < max; i++) { + result.add(new HashSet<>()); + } + for (List signature : valid) { + for (int i = 0; i < signature.size(); i++) { + result.get(i).add(signature.get(i)); + } + } + return result; + } + + protected static Stream> allPermutations(int argumentCount) { + if (argumentCount == 0) { + return Stream.of(List.of()); + } + if (argumentCount > 3) { + throw new IllegalArgumentException("would generate too many combinations"); + } + Stream> stream = validFunctionParameters().map(List::of); + for (int i = 1; i < argumentCount; i++) { + stream = stream.flatMap(types -> validFunctionParameters().map(t -> append(types, t))); + } + return stream; + } + + /** + * The types that are valid in function parameters. This is used by the + * function tests to enumerate all possible parameters to test error messages + * for invalid combinations. + */ + public static Stream validFunctionParameters() { + return Arrays.stream(DataType.values()).filter(t -> { + if (t == DataType.UNSUPPORTED) { + // By definition, functions never support UNSUPPORTED + return false; + } + if (t == DataType.DOC_DATA_TYPE || t == DataType.PARTIAL_AGG) { + /* + * Doc and partial_agg are special and functions aren't + * defined to take these. They'll use them implicitly if needed. + */ + return false; + } + if (t == DataType.OBJECT || t == DataType.NESTED) { + // Object and nested fields aren't supported by any functions yet + return false; + } + if (t == DataType.SOURCE || t == DataType.TSID_DATA_TYPE) { + // No functions take source or tsid fields yet. We'll make some eventually and remove this. + return false; + } + if (t == DataType.DATE_PERIOD || t == DataType.TIME_DURATION) { + // We don't test that functions don't take date_period or time_duration. We should. + return false; + } + if (t.isCounter()) { + /* + * For now, we're assuming no functions take counters + * as parameters. That's not true - some do. But we'll + * need to update the tests to handle that. + */ + return false; + } + if (t.widenSmallNumeric() != t) { + // Small numeric types are widened long before they arrive at functions. + return false; + } + + return true; + }).sorted(); + } + /** * Build an {@link Attribute} that loads a field. */ @@ -997,6 +1316,17 @@ protected static DataType[] strings() { return DataType.types().stream().filter(DataType::isString).toArray(DataType[]::new); } + /** + * Validate that we know the types for all the test cases already created + * @param suppliers - list of suppliers before adding in the illegal type combinations + */ + protected static void typesRequired(List suppliers) { + String bad = suppliers.stream().filter(s -> s.types() == null).map(s -> s.name()).collect(Collectors.joining("\n")); + if (bad.equals("") == false) { + throw new IllegalArgumentException("types required but not found for these tests:\n" + bad); + } + } + /** * Returns true if the current test case is for an aggregation function. *

diff --git a/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/expression/function/AbstractScalarFunctionTestCase.java b/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/expression/function/AbstractScalarFunctionTestCase.java index 1caea78e79ad5..f4123af8abd0a 100644 --- a/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/expression/function/AbstractScalarFunctionTestCase.java +++ b/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/expression/function/AbstractScalarFunctionTestCase.java @@ -22,21 +22,15 @@ import org.elasticsearch.indices.CrankyCircuitBreakerService; import org.elasticsearch.xpack.esql.TestBlockFactory; import org.elasticsearch.xpack.esql.core.expression.Expression; -import org.elasticsearch.xpack.esql.core.expression.TypeResolutions; import org.elasticsearch.xpack.esql.core.type.DataType; import org.elasticsearch.xpack.esql.core.util.NumericUtils; -import org.elasticsearch.xpack.esql.expression.function.scalar.conditional.Greatest; import org.elasticsearch.xpack.esql.expression.function.scalar.multivalue.AbstractMultivalueFunctionTestCase; -import org.elasticsearch.xpack.esql.expression.function.scalar.nulls.Coalesce; import org.elasticsearch.xpack.esql.optimizer.FoldNull; import org.elasticsearch.xpack.esql.planner.PlannerUtils; import org.hamcrest.Matcher; import java.util.ArrayList; -import java.util.Arrays; -import java.util.HashSet; import java.util.List; -import java.util.Locale; import java.util.Set; import java.util.concurrent.ExecutionException; import java.util.concurrent.ExecutorService; @@ -44,7 +38,6 @@ import java.util.concurrent.Future; import java.util.stream.Collectors; import java.util.stream.IntStream; -import java.util.stream.Stream; import static org.elasticsearch.compute.data.BlockUtils.toJavaObject; import static org.hamcrest.Matchers.either; @@ -372,152 +365,6 @@ public final void testFold() { } } - /** - * Adds cases with {@code null} and asserts that the result is {@code null}. - *

- * Note: This won't add more than a single null to any existing test case, - * just to keep the number of test cases from exploding totally. - *

- * - * @param entirelyNullPreservesType should a test case that only contains parameters - * with the {@code null} type keep it's expected type? - * This is mostly going to be {@code true} - * except for functions that base their type entirely - * on input types like {@link Greatest} or {@link Coalesce}. - */ - protected static List anyNullIsNull(boolean entirelyNullPreservesType, List testCaseSuppliers) { - return anyNullIsNull( - testCaseSuppliers, - (nullPosition, nullValueDataType, original) -> entirelyNullPreservesType == false - && nullValueDataType == DataType.NULL - && original.getData().size() == 1 ? DataType.NULL : original.expectedType(), - (nullPosition, nullData, original) -> original - ); - } - - public interface ExpectedType { - DataType expectedType(int nullPosition, DataType nullValueDataType, TestCaseSupplier.TestCase original); - } - - public interface ExpectedEvaluatorToString { - Matcher evaluatorToString(int nullPosition, TestCaseSupplier.TypedData nullData, Matcher original); - } - - protected static List anyNullIsNull( - List testCaseSuppliers, - ExpectedType expectedType, - ExpectedEvaluatorToString evaluatorToString - ) { - typesRequired(testCaseSuppliers); - List suppliers = new ArrayList<>(testCaseSuppliers.size()); - suppliers.addAll(testCaseSuppliers); - - /* - * For each original test case, add as many copies as there were - * arguments, replacing one of the arguments with null and keeping - * the others. - * - * Also, if this was the first time we saw the signature we copy it - * *again*, replacing the argument with null, but annotating the - * argument's type as `null` explicitly. - */ - Set> uniqueSignatures = new HashSet<>(); - for (TestCaseSupplier original : testCaseSuppliers) { - boolean firstTimeSeenSignature = uniqueSignatures.add(original.types()); - for (int nullPosition = 0; nullPosition < original.types().size(); nullPosition++) { - int finalNullPosition = nullPosition; - suppliers.add(new TestCaseSupplier(original.name() + " null in " + nullPosition, original.types(), () -> { - TestCaseSupplier.TestCase oc = original.get(); - List data = IntStream.range(0, oc.getData().size()).mapToObj(i -> { - TestCaseSupplier.TypedData od = oc.getData().get(i); - return i == finalNullPosition ? od.withData(null) : od; - }).toList(); - TestCaseSupplier.TypedData nulledData = oc.getData().get(finalNullPosition); - return new TestCaseSupplier.TestCase( - data, - evaluatorToString.evaluatorToString(finalNullPosition, nulledData, oc.evaluatorToString()), - expectedType.expectedType(finalNullPosition, nulledData.type(), oc), - nullValue(), - null, - oc.getExpectedTypeError(), - null, - null - ); - })); - - if (firstTimeSeenSignature) { - List typesWithNull = IntStream.range(0, original.types().size()) - .mapToObj(i -> i == finalNullPosition ? DataType.NULL : original.types().get(i)) - .toList(); - boolean newSignature = uniqueSignatures.add(typesWithNull); - if (newSignature) { - suppliers.add(new TestCaseSupplier(typesWithNull, () -> { - TestCaseSupplier.TestCase oc = original.get(); - List data = IntStream.range(0, oc.getData().size()) - .mapToObj(i -> i == finalNullPosition ? TestCaseSupplier.TypedData.NULL : oc.getData().get(i)) - .toList(); - return new TestCaseSupplier.TestCase( - data, - equalTo("LiteralsEvaluator[lit=null]"), - expectedType.expectedType(finalNullPosition, DataType.NULL, oc), - nullValue(), - null, - oc.getExpectedTypeError(), - null, - null - ); - })); - } - } - } - } - - return suppliers; - - } - - /** - * Adds test cases containing unsupported parameter types that assert - * that they throw type errors. - */ - protected static List errorsForCasesWithoutExamples( - List testCaseSuppliers, - PositionalErrorMessageSupplier positionalErrorMessageSupplier - ) { - return errorsForCasesWithoutExamples( - testCaseSuppliers, - (i, v, t) -> AbstractScalarFunctionTestCase.typeErrorMessage(i, v, t, positionalErrorMessageSupplier) - ); - } - - protected static List errorsForCasesWithoutExamples( - List testCaseSuppliers, - TypeErrorMessageSupplier typeErrorMessageSupplier - ) { - typesRequired(testCaseSuppliers); - List suppliers = new ArrayList<>(testCaseSuppliers.size()); - suppliers.addAll(testCaseSuppliers); - - Set> valid = testCaseSuppliers.stream().map(TestCaseSupplier::types).collect(Collectors.toSet()); - List> validPerPosition = validPerPosition(valid); - - testCaseSuppliers.stream() - .map(s -> s.types().size()) - .collect(Collectors.toSet()) - .stream() - .flatMap(count -> allPermutations(count)) - .filter(types -> valid.contains(types) == false) - /* - * Skip any cases with more than one null. Our tests don't generate - * the full combinatorial explosions of all nulls - just a single null. - * Hopefully , cases will function the same as , - * cases. - */.filter(types -> types.stream().filter(t -> t == DataType.NULL).count() <= 1) - .map(types -> typeErrorSupplier(validPerPosition.size() != 1, validPerPosition, types, typeErrorMessageSupplier)) - .forEach(suppliers::add); - return suppliers; - } - public static String errorMessageStringForBinaryOperators( boolean includeOrdinal, List> validPerPosition, @@ -572,178 +419,4 @@ protected static List failureForCasesWithoutExamples(List suppliers) { - String bad = suppliers.stream().filter(s -> s.types() == null).map(s -> s.name()).collect(Collectors.joining("\n")); - if (bad.equals("") == false) { - throw new IllegalArgumentException("types required but not found for these tests:\n" + bad); - } - } - - private static List> validPerPosition(Set> valid) { - int max = valid.stream().mapToInt(List::size).max().getAsInt(); - List> result = new ArrayList<>(max); - for (int i = 0; i < max; i++) { - result.add(new HashSet<>()); - } - for (List signature : valid) { - for (int i = 0; i < signature.size(); i++) { - result.get(i).add(signature.get(i)); - } - } - return result; - } - - private static Stream> allPermutations(int argumentCount) { - if (argumentCount == 0) { - return Stream.of(List.of()); - } - if (argumentCount > 3) { - throw new IllegalArgumentException("would generate too many combinations"); - } - Stream> stream = validFunctionParameters().map(List::of); - for (int i = 1; i < argumentCount; i++) { - stream = stream.flatMap(types -> validFunctionParameters().map(t -> append(types, t))); - } - return stream; - } - - private static List append(List orig, DataType extra) { - List longer = new ArrayList<>(orig.size() + 1); - longer.addAll(orig); - longer.add(extra); - return longer; - } - - @FunctionalInterface - protected interface TypeErrorMessageSupplier { - String apply(boolean includeOrdinal, List> validPerPosition, List types); - } - - @FunctionalInterface - protected interface PositionalErrorMessageSupplier { - /** - * This interface defines functions to supply error messages for incorrect types in specific positions. Functions which have - * the same type requirements for all positions can simplify this with a lambda returning a string constant. - * - * @param validForPosition - the set of {@link DataType}s that the test infrastructure believes to be allowable in the - * given position. - * @param position - the zero-index position in the list of parameters the function has detected the bad argument to be. - * @return The string describing the acceptable parameters for that position. Note that this function should not return - * the full error string; that will be constructed by the test. Just return the type string for that position. - */ - String apply(Set validForPosition, int position); - } - - protected static TestCaseSupplier typeErrorSupplier( - boolean includeOrdinal, - List> validPerPosition, - List types, - PositionalErrorMessageSupplier errorMessageSupplier - ) { - return typeErrorSupplier( - includeOrdinal, - validPerPosition, - types, - (o, v, t) -> AbstractScalarFunctionTestCase.typeErrorMessage(o, v, t, errorMessageSupplier) - ); - } - - /** - * Build a test case that asserts that the combination of parameter types is an error. - */ - protected static TestCaseSupplier typeErrorSupplier( - boolean includeOrdinal, - List> validPerPosition, - List types, - TypeErrorMessageSupplier errorMessageSupplier - ) { - return new TestCaseSupplier( - "type error for " + TestCaseSupplier.nameFromTypes(types), - types, - () -> TestCaseSupplier.TestCase.typeError( - types.stream().map(type -> new TestCaseSupplier.TypedData(randomLiteral(type).value(), type, type.typeName())).toList(), - errorMessageSupplier.apply(includeOrdinal, validPerPosition, types) - ) - ); - } - - /** - * Build the expected error message for an invalid type signature. - */ - protected static String typeErrorMessage( - boolean includeOrdinal, - List> validPerPosition, - List types, - PositionalErrorMessageSupplier expectedTypeSupplier - ) { - int badArgPosition = -1; - for (int i = 0; i < types.size(); i++) { - if (validPerPosition.get(i).contains(types.get(i)) == false) { - badArgPosition = i; - break; - } - } - if (badArgPosition == -1) { - throw new IllegalStateException( - "Can't generate error message for these types, you probably need a custom error message function" - ); - } - String ordinal = includeOrdinal ? TypeResolutions.ParamOrdinal.fromIndex(badArgPosition).name().toLowerCase(Locale.ROOT) + " " : ""; - String expectedTypeString = expectedTypeSupplier.apply(validPerPosition.get(badArgPosition), badArgPosition); - String name = types.get(badArgPosition).typeName(); - return ordinal + "argument of [] must be [" + expectedTypeString + "], found value [" + name + "] type [" + name + "]"; - } - - /** - * The types that are valid in function parameters. This is used by the - * function tests to enumerate all possible parameters to test error messages - * for invalid combinations. - */ - public static Stream validFunctionParameters() { - return Arrays.stream(DataType.values()).filter(t -> { - if (t == DataType.UNSUPPORTED) { - // By definition, functions never support UNSUPPORTED - return false; - } - if (t == DataType.DOC_DATA_TYPE || t == DataType.PARTIAL_AGG) { - /* - * Doc and partial_agg are special and functions aren't - * defined to take these. They'll use them implicitly if needed. - */ - return false; - } - if (t == DataType.OBJECT || t == DataType.NESTED) { - // Object and nested fields aren't supported by any functions yet - return false; - } - if (t == DataType.SOURCE || t == DataType.TSID_DATA_TYPE) { - // No functions take source or tsid fields yet. We'll make some eventually and remove this. - return false; - } - if (t == DataType.DATE_PERIOD || t == DataType.TIME_DURATION) { - // We don't test that functions don't take date_period or time_duration. We should. - return false; - } - if (t.isCounter()) { - /* - * For now, we're assuming no functions take counters - * as parameters. That's not true - some do. But we'll - * need to update the tests to handle that. - */ - return false; - } - if (t.widenSmallNumeric() != t) { - // Small numeric types are widened long before they arrive at functions. - return false; - } - - return true; - }).sorted(); - } - } diff --git a/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/expression/function/TestCaseSupplier.java b/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/expression/function/TestCaseSupplier.java index 3585e58bf97ab..6652cca0c4527 100644 --- a/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/expression/function/TestCaseSupplier.java +++ b/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/expression/function/TestCaseSupplier.java @@ -30,6 +30,7 @@ import java.time.Period; import java.util.ArrayList; import java.util.Arrays; +import java.util.Collections; import java.util.List; import java.util.function.BiFunction; import java.util.function.BinaryOperator; @@ -1455,6 +1456,7 @@ public TypedData get() { */ public static class TypedData { public static final TypedData NULL = new TypedData(null, DataType.NULL, ""); + public static final TypedData MULTI_ROW_NULL = TypedData.multiRow(Collections.singletonList(null), DataType.NULL, ""); private final Object data; private final DataType type; @@ -1583,7 +1585,7 @@ public Literal asLiteral() { throw new IllegalStateException("Multirow values require exactly 1 element to be a literal, got " + values.size()); } - return new Literal(Source.synthetic(name), values, type); + return new Literal(Source.synthetic(name), values.get(0), type); } return new Literal(Source.synthetic(name), data, type); } diff --git a/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/expression/function/aggregate/AvgTests.java b/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/expression/function/aggregate/AvgTests.java index f456bd409059a..80737dac1aa58 100644 --- a/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/expression/function/aggregate/AvgTests.java +++ b/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/expression/function/aggregate/AvgTests.java @@ -53,7 +53,7 @@ public static Iterable parameters() { ) ); - return parameterSuppliersFromTypedDataWithDefaultChecks(suppliers); + return parameterSuppliersFromTypedDataWithDefaultChecks(suppliers, true, (v, p) -> "numeric except unsigned_long or counter types"); } @Override diff --git a/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/expression/function/aggregate/MaxTests.java b/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/expression/function/aggregate/MaxTests.java index 1d489e0146ad3..52e908a51dd1e 100644 --- a/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/expression/function/aggregate/MaxTests.java +++ b/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/expression/function/aggregate/MaxTests.java @@ -49,73 +49,6 @@ public static Iterable parameters() { suppliers.addAll( List.of( - // Surrogates - new TestCaseSupplier( - List.of(DataType.INTEGER), - () -> new TestCaseSupplier.TestCase( - List.of(TestCaseSupplier.TypedData.multiRow(List.of(5, 8, -2, 0, 200), DataType.INTEGER, "field")), - "Max[field=Attribute[channel=0]]", - DataType.INTEGER, - equalTo(200) - ) - ), - new TestCaseSupplier( - List.of(DataType.LONG), - () -> new TestCaseSupplier.TestCase( - List.of(TestCaseSupplier.TypedData.multiRow(List.of(5L, 8L, -2L, 0L, 200L), DataType.LONG, "field")), - "Max[field=Attribute[channel=0]]", - DataType.LONG, - equalTo(200L) - ) - ), - new TestCaseSupplier( - List.of(DataType.DOUBLE), - () -> new TestCaseSupplier.TestCase( - List.of(TestCaseSupplier.TypedData.multiRow(List.of(5., 8., -2., 0., 200.), DataType.DOUBLE, "field")), - "Max[field=Attribute[channel=0]]", - DataType.DOUBLE, - equalTo(200.) - ) - ), - new TestCaseSupplier( - List.of(DataType.DATETIME), - () -> new TestCaseSupplier.TestCase( - List.of(TestCaseSupplier.TypedData.multiRow(List.of(5L, 8L, 2L, 0L, 200L), DataType.DATETIME, "field")), - "Max[field=Attribute[channel=0]]", - DataType.DATETIME, - equalTo(200L) - ) - ), - new TestCaseSupplier( - List.of(DataType.BOOLEAN), - () -> new TestCaseSupplier.TestCase( - List.of(TestCaseSupplier.TypedData.multiRow(List.of(true, false, false, true), DataType.BOOLEAN, "field")), - "Max[field=Attribute[channel=0]]", - DataType.BOOLEAN, - equalTo(true) - ) - ), - new TestCaseSupplier( - List.of(DataType.IP), - () -> new TestCaseSupplier.TestCase( - List.of( - TestCaseSupplier.TypedData.multiRow( - List.of( - new BytesRef(InetAddressPoint.encode(InetAddresses.forString("127.0.0.1"))), - new BytesRef(InetAddressPoint.encode(InetAddresses.forString("::1"))), - new BytesRef(InetAddressPoint.encode(InetAddresses.forString("::"))), - new BytesRef(InetAddressPoint.encode(InetAddresses.forString("ffff::"))) - ), - DataType.IP, - "field" - ) - ), - "Max[field=Attribute[channel=0]]", - DataType.IP, - equalTo(new BytesRef(InetAddressPoint.encode(InetAddresses.forString("ffff::")))) - ) - ), - // Folding new TestCaseSupplier( List.of(DataType.INTEGER), @@ -180,7 +113,11 @@ public static Iterable parameters() { ) ); - return parameterSuppliersFromTypedDataWithDefaultChecks(suppliers); + return parameterSuppliersFromTypedDataWithDefaultChecks( + suppliers, + false, + (v, p) -> "boolean, datetime, ip or numeric except unsigned_long or counter types" + ); } @Override diff --git a/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/expression/function/aggregate/MinTests.java b/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/expression/function/aggregate/MinTests.java index b5fb5b2c1c414..9514c817df497 100644 --- a/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/expression/function/aggregate/MinTests.java +++ b/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/expression/function/aggregate/MinTests.java @@ -49,73 +49,6 @@ public static Iterable parameters() { suppliers.addAll( List.of( - // Surrogates - new TestCaseSupplier( - List.of(DataType.INTEGER), - () -> new TestCaseSupplier.TestCase( - List.of(TestCaseSupplier.TypedData.multiRow(List.of(5, 8, -2, 0, 200), DataType.INTEGER, "field")), - "Min[field=Attribute[channel=0]]", - DataType.INTEGER, - equalTo(-2) - ) - ), - new TestCaseSupplier( - List.of(DataType.LONG), - () -> new TestCaseSupplier.TestCase( - List.of(TestCaseSupplier.TypedData.multiRow(List.of(5L, 8L, -2L, 0L, 200L), DataType.LONG, "field")), - "Min[field=Attribute[channel=0]]", - DataType.LONG, - equalTo(-2L) - ) - ), - new TestCaseSupplier( - List.of(DataType.DOUBLE), - () -> new TestCaseSupplier.TestCase( - List.of(TestCaseSupplier.TypedData.multiRow(List.of(5., 8., -2., 0., 200.), DataType.DOUBLE, "field")), - "Min[field=Attribute[channel=0]]", - DataType.DOUBLE, - equalTo(-2.) - ) - ), - new TestCaseSupplier( - List.of(DataType.DATETIME), - () -> new TestCaseSupplier.TestCase( - List.of(TestCaseSupplier.TypedData.multiRow(List.of(5L, 8L, 2L, 0L, 200L), DataType.DATETIME, "field")), - "Min[field=Attribute[channel=0]]", - DataType.DATETIME, - equalTo(0L) - ) - ), - new TestCaseSupplier( - List.of(DataType.BOOLEAN), - () -> new TestCaseSupplier.TestCase( - List.of(TestCaseSupplier.TypedData.multiRow(List.of(true, false, false, true), DataType.BOOLEAN, "field")), - "Min[field=Attribute[channel=0]]", - DataType.BOOLEAN, - equalTo(false) - ) - ), - new TestCaseSupplier( - List.of(DataType.IP), - () -> new TestCaseSupplier.TestCase( - List.of( - TestCaseSupplier.TypedData.multiRow( - List.of( - new BytesRef(InetAddressPoint.encode(InetAddresses.forString("127.0.0.1"))), - new BytesRef(InetAddressPoint.encode(InetAddresses.forString("::1"))), - new BytesRef(InetAddressPoint.encode(InetAddresses.forString("::"))), - new BytesRef(InetAddressPoint.encode(InetAddresses.forString("ffff::"))) - ), - DataType.IP, - "field" - ) - ), - "Min[field=Attribute[channel=0]]", - DataType.IP, - equalTo(new BytesRef(InetAddressPoint.encode(InetAddresses.forString("::")))) - ) - ), - // Folding new TestCaseSupplier( List.of(DataType.INTEGER), @@ -180,7 +113,11 @@ public static Iterable parameters() { ) ); - return parameterSuppliersFromTypedDataWithDefaultChecks(suppliers); + return parameterSuppliersFromTypedDataWithDefaultChecks( + suppliers, + false, + (v, p) -> "boolean, datetime, ip or numeric except unsigned_long or counter types" + ); } @Override diff --git a/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/expression/function/aggregate/ValuesTests.java b/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/expression/function/aggregate/ValuesTests.java index 23b70b94d0d7f..55320543d0ec3 100644 --- a/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/expression/function/aggregate/ValuesTests.java +++ b/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/expression/function/aggregate/ValuesTests.java @@ -53,7 +53,11 @@ public static Iterable parameters() { MultiRowTestCaseSupplier.stringCases(1, 20, DataType.TEXT) ).flatMap(List::stream).map(ValuesTests::makeSupplier).collect(Collectors.toCollection(() -> suppliers)); - return parameterSuppliersFromTypedDataWithDefaultChecks(suppliers); + return parameterSuppliersFromTypedDataWithDefaultChecks( + suppliers, + false, + (v, p) -> "any type except unsigned_long and spatial types" + ); } @Override diff --git a/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/expression/function/aggregate/WeightedAvgTests.java b/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/expression/function/aggregate/WeightedAvgTests.java index 2ba091437f237..2c2ffc97f268c 100644 --- a/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/expression/function/aggregate/WeightedAvgTests.java +++ b/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/expression/function/aggregate/WeightedAvgTests.java @@ -52,11 +52,11 @@ public static Iterable parameters() { List.of( // Folding new TestCaseSupplier( - List.of(DataType.INTEGER), + List.of(DataType.INTEGER, DataType.INTEGER), () -> new TestCaseSupplier.TestCase( List.of( - TestCaseSupplier.TypedData.multiRow(List.of(5), DataType.INTEGER, "field"), - TestCaseSupplier.TypedData.multiRow(List.of(100), DataType.INTEGER, "field") + TestCaseSupplier.TypedData.multiRow(List.of(5), DataType.INTEGER, "number"), + TestCaseSupplier.TypedData.multiRow(List.of(100), DataType.INTEGER, "weight") ), "WeightedAvg[number=Attribute[channel=0],weight=Attribute[channel=1]]", DataType.DOUBLE, @@ -64,11 +64,11 @@ public static Iterable parameters() { ) ), new TestCaseSupplier( - List.of(DataType.LONG), + List.of(DataType.LONG, DataType.INTEGER), () -> new TestCaseSupplier.TestCase( List.of( - TestCaseSupplier.TypedData.multiRow(List.of(5L), DataType.LONG, "field"), - TestCaseSupplier.TypedData.multiRow(List.of(100), DataType.INTEGER, "field") + TestCaseSupplier.TypedData.multiRow(List.of(5L), DataType.LONG, "number"), + TestCaseSupplier.TypedData.multiRow(List.of(100), DataType.INTEGER, "weight") ), "WeightedAvg[number=Attribute[channel=0],weight=Attribute[channel=1]]", DataType.DOUBLE, @@ -76,11 +76,11 @@ public static Iterable parameters() { ) ), new TestCaseSupplier( - List.of(DataType.DOUBLE), + List.of(DataType.DOUBLE, DataType.INTEGER), () -> new TestCaseSupplier.TestCase( List.of( - TestCaseSupplier.TypedData.multiRow(List.of(5.), DataType.DOUBLE, "field"), - TestCaseSupplier.TypedData.multiRow(List.of(100), DataType.INTEGER, "field") + TestCaseSupplier.TypedData.multiRow(List.of(5.), DataType.DOUBLE, "number"), + TestCaseSupplier.TypedData.multiRow(List.of(100), DataType.INTEGER, "weight") ), "WeightedAvg[number=Attribute[channel=0],weight=Attribute[channel=1]]", DataType.DOUBLE, From 4034615e29eaeacab855b3f7eb223ecfa060737e Mon Sep 17 00:00:00 2001 From: Liam Thompson <32779855+leemthompo@users.noreply.github.com> Date: Thu, 1 Aug 2024 13:37:17 +0100 Subject: [PATCH 04/36] [DOCS] Clarify copy_to behavior with strict dynamic mappings (#111408) * [DOCS] Clarify copy_to behavior with strict dynamic mappings * Add id * De-verbosify * Delete pesky comma * More info about root and nest * Fixes per review, clarify non-recursive explanation * Skip tests for illustrative example * Fix example syntax * Fix typo --- .../reference/mapping/params/copy-to.asciidoc | 102 ++++++++++++++++-- 1 file changed, 95 insertions(+), 7 deletions(-) diff --git a/docs/reference/mapping/params/copy-to.asciidoc b/docs/reference/mapping/params/copy-to.asciidoc index 10eebfb027736..b26ceac349a3e 100644 --- a/docs/reference/mapping/params/copy-to.asciidoc +++ b/docs/reference/mapping/params/copy-to.asciidoc @@ -64,16 +64,104 @@ Some important points: * It is the field _value_ which is copied, not the terms (which result from the analysis process). * The original <> field will not be modified to show the copied values. * The same value can be copied to multiple fields, with `"copy_to": [ "field_1", "field_2" ]` -* You cannot copy recursively via intermediary fields such as a `copy_to` on -`field_1` to `field_2` and `copy_to` on `field_2` to `field_3` expecting -indexing into `field_1` will eventuate in `field_3`, instead use copy_to -directly to multiple fields from the originating field. +* You cannot copy recursively using intermediary fields. +The following configuration will not copy data from `field_1` to `field_3`: ++ +[source,console] +---- +PUT bad_example_index +{ + "mappings": { + "properties": { + "field_1": { + "type": "text", + "copy_to": "field_2" + }, + "field_2": { + "type": "text", + "copy_to": "field_3" + }, + "field_3": { + "type": "text" + } + } + } +} +---- +Instead, copy to multiple fields from the source field: ++ +[source,console] +---- +PUT good_example_index +{ + "mappings": { + "properties": { + "field_1": { + "type": "text", + "copy_to": ["field_2", "field_3"] + }, + "field_2": { + "type": "text" + }, + "field_3": { + "type": "text" + } + } + } +} +---- + +NOTE: `copy_to` is not supported for field types where values take the form of objects, e.g. `date_range`. + +[float] +[[copy-to-dynamic-mapping]] +==== Dynamic mapping + +Consider the following points when using `copy_to` with dynamic mappings: + * If the target field does not exist in the index mappings, the usual <> behavior applies. By default, with <> set to `true`, a non-existent target field will be -dynamically added to the index mappings. If `dynamic` is set to `false`, the +dynamically added to the index mappings. +* If `dynamic` is set to `false`, the target field will not be added to the index mappings, and the value will not be -copied. If `dynamic` is set to `strict`, copying to a non-existent field will +copied. +* If `dynamic` is set to `strict`, copying to a non-existent field will result in an error. ++ +** If the target field is nested, then `copy_to` fields must specify the full path to the nested field. +Omitting the full path will lead to a `strict_dynamic_mapping_exception`. +Use `"copy_to": ["parent_field.child_field"]` to correctly target a nested field. ++ +For example: ++ +[source,console] +-------------------------------------------------- +PUT /test_index +{ + "mappings": { + "dynamic": "strict", + "properties": { + "description": { + "properties": { + "notes": { + "type": "text", + "copy_to": [ "description.notes_raw"], <1> + "analyzer": "standard", + "search_analyzer": "standard" + }, + "notes_raw": { + "type": "keyword" + } + } + } + } + } +} +-------------------------------------------------- -NOTE: `copy_to` is _not_ supported for field types where values take the form of objects, e.g. `date_range` \ No newline at end of file +<1> The `notes` field is copied to the `notes_raw` field. Targeting `notes_raw` alone instead of `description.notes_raw` +would lead to a `strict_dynamic_mapping_exception`. ++ +In this example, `notes_raw` is not defined at the root of the mapping, but under the `description` field. +Without the fully qualified path, {es} would interpret the `copy_to` target as a root-level field, not as a nested field under `description`. \ No newline at end of file From 1329dc333d0e1aa8bdd64f41f5d81a7ba1c79fd8 Mon Sep 17 00:00:00 2001 From: Simon Cooper Date: Thu, 1 Aug 2024 14:33:33 +0100 Subject: [PATCH 05/36] Add release version to setCompatibleVersions task (#111489) The release version is used to determine if it actually needs to update the CCS version or not --- .../internal/release/ReleaseToolsPlugin.java | 2 +- .../release/SetCompatibleVersionsTask.java | 19 ++++++++++++++++++- 2 files changed, 19 insertions(+), 2 deletions(-) diff --git a/build-tools-internal/src/main/java/org/elasticsearch/gradle/internal/release/ReleaseToolsPlugin.java b/build-tools-internal/src/main/java/org/elasticsearch/gradle/internal/release/ReleaseToolsPlugin.java index 08abb02ea831e..ec79fe20492e1 100644 --- a/build-tools-internal/src/main/java/org/elasticsearch/gradle/internal/release/ReleaseToolsPlugin.java +++ b/build-tools-internal/src/main/java/org/elasticsearch/gradle/internal/release/ReleaseToolsPlugin.java @@ -52,7 +52,7 @@ public void apply(Project project) { project.getTasks().register("extractCurrentVersions", ExtractCurrentVersionsTask.class); project.getTasks().register("tagVersions", TagVersionsTask.class); - project.getTasks().register("setCompatibleVersions", SetCompatibleVersionsTask.class); + project.getTasks().register("setCompatibleVersions", SetCompatibleVersionsTask.class, t -> t.setThisVersion(version)); final FileTree yamlFiles = projectDirectory.dir("docs/changelog") .getAsFileTree() diff --git a/build-tools-internal/src/main/java/org/elasticsearch/gradle/internal/release/SetCompatibleVersionsTask.java b/build-tools-internal/src/main/java/org/elasticsearch/gradle/internal/release/SetCompatibleVersionsTask.java index 15e0a0cc345d5..17761e5183b31 100644 --- a/build-tools-internal/src/main/java/org/elasticsearch/gradle/internal/release/SetCompatibleVersionsTask.java +++ b/build-tools-internal/src/main/java/org/elasticsearch/gradle/internal/release/SetCompatibleVersionsTask.java @@ -14,6 +14,7 @@ import com.github.javaparser.ast.expr.NameExpr; import com.github.javaparser.printer.lexicalpreservation.LexicalPreservingPrinter; +import org.elasticsearch.gradle.Version; import org.gradle.api.tasks.TaskAction; import org.gradle.api.tasks.options.Option; import org.gradle.initialization.layout.BuildLayout; @@ -28,6 +29,8 @@ public class SetCompatibleVersionsTask extends AbstractVersionsTask { + private Version thisVersion; + private Version releaseVersion; private Map versionIds = Map.of(); @Inject @@ -35,21 +38,35 @@ public SetCompatibleVersionsTask(BuildLayout layout) { super(layout); } + public void setThisVersion(Version version) { + thisVersion = version; + } + @Option(option = "version-id", description = "Version id used for the release. Of the form :.") public void versionIds(List version) { this.versionIds = splitVersionIds(version); } + @Option(option = "release", description = "The version being released") + public void releaseVersion(String version) { + releaseVersion = Version.fromString(version); + } + @TaskAction public void executeTask() throws IOException { if (versionIds.isEmpty()) { throw new IllegalArgumentException("No version ids specified"); } + + if (releaseVersion.getMajor() < thisVersion.getMajor()) { + // don't need to update CCS version - this is for a different major + return; + } + Integer transportVersion = versionIds.get(TRANSPORT_VERSION_TYPE); if (transportVersion == null) { throw new IllegalArgumentException("TransportVersion id not specified"); } - Path versionJava = rootDir.resolve(TRANSPORT_VERSIONS_FILE_PATH); CompilationUnit file = LexicalPreservingPrinter.setup(StaticJavaParser.parse(versionJava)); From 96a04fc1e5b74e046e1caaecc53e36f86e47fad5 Mon Sep 17 00:00:00 2001 From: Nik Everett Date: Thu, 1 Aug 2024 09:36:17 -0400 Subject: [PATCH 06/36] ESQL: Make test result order consistent (#111510) This test for our new `MV_PSERIES_WEIGHTED_SUM` function was failing sometimes because it was asserting results returned in order but hadn't forced the result to come back in that order. --- .../src/main/resources/mv_pseries_weighted_sum.csv-spec | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/x-pack/plugin/esql/qa/testFixtures/src/main/resources/mv_pseries_weighted_sum.csv-spec b/x-pack/plugin/esql/qa/testFixtures/src/main/resources/mv_pseries_weighted_sum.csv-spec index 4d8ffd1136908..df8771ad7a832 100644 --- a/x-pack/plugin/esql/qa/testFixtures/src/main/resources/mv_pseries_weighted_sum.csv-spec +++ b/x-pack/plugin/esql/qa/testFixtures/src/main/resources/mv_pseries_weighted_sum.csv-spec @@ -70,7 +70,8 @@ FROM alerts TOP(kibana.alert.risk_score, 10000, "desc"), 1.5 ) BY host.name | EVAL normalized_score = ROUND(100 * score / 261.2, 2) -| KEEP host.name, normalized_score, score; +| KEEP host.name, normalized_score, score +| SORT normalized_score DESC; host.name:keyword|normalized_score:double|score:double test-host-1 |36.16 |94.45465156212452 From 63c2ddea115c2ca9e55ecce692b6ec07c1bd675a Mon Sep 17 00:00:00 2001 From: Nhat Nguyen Date: Thu, 1 Aug 2024 06:53:37 -0700 Subject: [PATCH 07/36] Fix DocValuesCodecDuelTests (#111503) We should not access any docValues attribute of the current document if advance() is exhausted or if advanceExact() returns false. Closes #111470 --- .../codec/tsdb/DocValuesCodecDuelTests.java | 32 ++++++++++++------- 1 file changed, 21 insertions(+), 11 deletions(-) diff --git a/server/src/test/java/org/elasticsearch/index/codec/tsdb/DocValuesCodecDuelTests.java b/server/src/test/java/org/elasticsearch/index/codec/tsdb/DocValuesCodecDuelTests.java index e8949dda78f7f..9b58e785131c9 100644 --- a/server/src/test/java/org/elasticsearch/index/codec/tsdb/DocValuesCodecDuelTests.java +++ b/server/src/test/java/org/elasticsearch/index/codec/tsdb/DocValuesCodecDuelTests.java @@ -232,6 +232,9 @@ private void assertSortedSetDocValues(LeafReader baselineReader, LeafReader cont for (int i = 0; i < docIdsToAdvanceTo.length; i++) { int docId = docIdsToAdvanceTo[i]; int baselineTarget = assertAdvance(docId, baselineReader, contenderReader, baseline, contender); + if (baselineTarget == NO_MORE_DOCS) { + break; + } assertEquals(baseline.docValueCount(), contender.docValueCount()); for (int j = 0; j < baseline.docValueCount(); j++) { long baselineOrd = baseline.nextOrd(); @@ -255,12 +258,14 @@ private void assertSortedSetDocValues(LeafReader baselineReader, LeafReader cont boolean contenderFound = contender.advanceExact(docId); assertEquals(baselineFound, contenderFound); assertEquals(baseline.docID(), contender.docID()); - assertEquals(baseline.docValueCount(), contender.docValueCount()); - for (int i = 0; i < baseline.docValueCount(); i++) { - long baselineOrd = baseline.nextOrd(); - long contenderOrd = contender.nextOrd(); - assertEquals(baselineOrd, contenderOrd); - assertEquals(baseline.lookupOrd(baselineOrd), contender.lookupOrd(contenderOrd)); + if (baselineFound) { + assertEquals(baseline.docValueCount(), contender.docValueCount()); + for (int i = 0; i < baseline.docValueCount(); i++) { + long baselineOrd = baseline.nextOrd(); + long contenderOrd = contender.nextOrd(); + assertEquals(baselineOrd, contenderOrd); + assertEquals(baseline.lookupOrd(baselineOrd), contender.lookupOrd(contenderOrd)); + } } } } @@ -328,6 +333,9 @@ private void assertSortedNumericDocValues(LeafReader baselineReader, LeafReader for (int i = 0; i < docIdsToAdvanceTo.length; i++) { int docId = docIdsToAdvanceTo[i]; int baselineTarget = assertAdvance(docId, baselineReader, contenderReader, baseline, contender); + if (baselineTarget == NO_MORE_DOCS) { + break; + } assertEquals(baseline.docValueCount(), contender.docValueCount()); for (int j = 0; j < baseline.docValueCount(); j++) { long baselineValue = baseline.nextValue(); @@ -349,11 +357,13 @@ private void assertSortedNumericDocValues(LeafReader baselineReader, LeafReader boolean contenderResult = contender.advanceExact(docId); assertEquals(baselineResult, contenderResult); assertEquals(baseline.docID(), contender.docID()); - assertEquals(baseline.docValueCount(), contender.docValueCount()); - for (int i = 0; i < baseline.docValueCount(); i++) { - long baselineValue = baseline.nextValue(); - long contenderValue = contender.nextValue(); - assertEquals(baselineValue, contenderValue); + if (baselineResult) { + assertEquals(baseline.docValueCount(), contender.docValueCount()); + for (int i = 0; i < baseline.docValueCount(); i++) { + long baselineValue = baseline.nextValue(); + long contenderValue = contender.nextValue(); + assertEquals(baselineValue, contenderValue); + } } } } From 4d67ac1ef5d95814dd526c7751ca025615cc5651 Mon Sep 17 00:00:00 2001 From: Pat Whelan Date: Thu, 1 Aug 2024 10:08:04 -0400 Subject: [PATCH 08/36] [ML] Start a new trace before loading trained model (#111364) Each distinct task is a different span in APM tracing, so trained model deployments need a new trace context. --- .../TransportLoadTrainedModelPackage.java | 46 +++++++++++-------- 1 file changed, 28 insertions(+), 18 deletions(-) diff --git a/x-pack/plugin/ml-package-loader/src/main/java/org/elasticsearch/xpack/ml/packageloader/action/TransportLoadTrainedModelPackage.java b/x-pack/plugin/ml-package-loader/src/main/java/org/elasticsearch/xpack/ml/packageloader/action/TransportLoadTrainedModelPackage.java index cdc3205f4197c..c4c2c17fcbc12 100644 --- a/x-pack/plugin/ml-package-loader/src/main/java/org/elasticsearch/xpack/ml/packageloader/action/TransportLoadTrainedModelPackage.java +++ b/x-pack/plugin/ml-package-loader/src/main/java/org/elasticsearch/xpack/ml/packageloader/action/TransportLoadTrainedModelPackage.java @@ -180,27 +180,37 @@ static void importModel( } private ModelDownloadTask createDownloadTask(Request request) { - return (ModelDownloadTask) taskManager.register(MODEL_IMPORT_TASK_TYPE, MODEL_IMPORT_TASK_ACTION, new TaskAwareRequest() { - @Override - public void setParentTask(TaskId taskId) { - request.setParentTask(taskId); - } + // Loading the model is done by a separate task, so needs a new trace context + try (var ignored = threadPool.getThreadContext().newTraceContext()) { + return (ModelDownloadTask) taskManager.register(MODEL_IMPORT_TASK_TYPE, MODEL_IMPORT_TASK_ACTION, new TaskAwareRequest() { + @Override + public void setParentTask(TaskId taskId) { + request.setParentTask(taskId); + } - @Override - public void setRequestId(long requestId) { - request.setRequestId(requestId); - } + @Override + public void setRequestId(long requestId) { + request.setRequestId(requestId); + } - @Override - public TaskId getParentTask() { - return request.getParentTask(); - } + @Override + public TaskId getParentTask() { + return request.getParentTask(); + } - @Override - public ModelDownloadTask createTask(long id, String type, String action, TaskId parentTaskId, Map headers) { - return new ModelDownloadTask(id, type, action, downloadModelTaskDescription(request.getModelId()), parentTaskId, headers); - } - }, false); + @Override + public ModelDownloadTask createTask(long id, String type, String action, TaskId parentTaskId, Map headers) { + return new ModelDownloadTask( + id, + type, + action, + downloadModelTaskDescription(request.getModelId()), + parentTaskId, + headers + ); + } + }, false); + } } private static void recordError( From 46f941c9f637ddf174653c68d5c609cfc857ac75 Mon Sep 17 00:00:00 2001 From: elasticsearchmachine Date: Thu, 1 Aug 2024 14:25:01 +0000 Subject: [PATCH 09/36] Bump versions after 7.17.23 release --- .buildkite/pipelines/intake.yml | 2 +- .buildkite/pipelines/periodic-packaging.yml | 6 +++--- .buildkite/pipelines/periodic.yml | 10 +++++----- .ci/bwcVersions | 2 +- .ci/snapshotBwcVersions | 2 +- server/src/main/java/org/elasticsearch/Version.java | 1 + .../resources/org/elasticsearch/TransportVersions.csv | 1 + .../org/elasticsearch/index/IndexVersions.csv | 1 + 8 files changed, 14 insertions(+), 11 deletions(-) diff --git a/.buildkite/pipelines/intake.yml b/.buildkite/pipelines/intake.yml index 776b1ab944f69..e323a9238ca5b 100644 --- a/.buildkite/pipelines/intake.yml +++ b/.buildkite/pipelines/intake.yml @@ -62,7 +62,7 @@ steps: timeout_in_minutes: 300 matrix: setup: - BWC_VERSION: ["7.17.23", "8.14.4", "8.15.0", "8.16.0"] + BWC_VERSION: ["7.17.24", "8.14.4", "8.15.0", "8.16.0"] agents: provider: gcp image: family/elasticsearch-ubuntu-2004 diff --git a/.buildkite/pipelines/periodic-packaging.yml b/.buildkite/pipelines/periodic-packaging.yml index e9c743885d78d..6e86e46f79484 100644 --- a/.buildkite/pipelines/periodic-packaging.yml +++ b/.buildkite/pipelines/periodic-packaging.yml @@ -322,8 +322,8 @@ steps: env: BWC_VERSION: 7.16.3 - - label: "{{matrix.image}} / 7.17.23 / packaging-tests-upgrade" - command: ./.ci/scripts/packaging-test.sh -Dbwc.checkout.align=true destructiveDistroUpgradeTest.v7.17.23 + - label: "{{matrix.image}} / 7.17.24 / packaging-tests-upgrade" + command: ./.ci/scripts/packaging-test.sh -Dbwc.checkout.align=true destructiveDistroUpgradeTest.v7.17.24 timeout_in_minutes: 300 matrix: setup: @@ -337,7 +337,7 @@ steps: buildDirectory: /dev/shm/bk diskSizeGb: 250 env: - BWC_VERSION: 7.17.23 + BWC_VERSION: 7.17.24 - label: "{{matrix.image}} / 8.0.1 / packaging-tests-upgrade" command: ./.ci/scripts/packaging-test.sh -Dbwc.checkout.align=true destructiveDistroUpgradeTest.v8.0.1 diff --git a/.buildkite/pipelines/periodic.yml b/.buildkite/pipelines/periodic.yml index f908b946bb523..a0bc07f7ca3b7 100644 --- a/.buildkite/pipelines/periodic.yml +++ b/.buildkite/pipelines/periodic.yml @@ -342,8 +342,8 @@ steps: - signal_reason: agent_stop limit: 3 - - label: 7.17.23 / bwc - command: .ci/scripts/run-gradle.sh -Dbwc.checkout.align=true v7.17.23#bwcTest + - label: 7.17.24 / bwc + command: .ci/scripts/run-gradle.sh -Dbwc.checkout.align=true v7.17.24#bwcTest timeout_in_minutes: 300 agents: provider: gcp @@ -353,7 +353,7 @@ steps: preemptible: true diskSizeGb: 250 env: - BWC_VERSION: 7.17.23 + BWC_VERSION: 7.17.24 retry: automatic: - exit_status: "-1" @@ -771,7 +771,7 @@ steps: setup: ES_RUNTIME_JAVA: - openjdk17 - BWC_VERSION: ["7.17.23", "8.14.4", "8.15.0", "8.16.0"] + BWC_VERSION: ["7.17.24", "8.14.4", "8.15.0", "8.16.0"] agents: provider: gcp image: family/elasticsearch-ubuntu-2004 @@ -821,7 +821,7 @@ steps: - openjdk21 - openjdk22 - openjdk23 - BWC_VERSION: ["7.17.23", "8.14.4", "8.15.0", "8.16.0"] + BWC_VERSION: ["7.17.24", "8.14.4", "8.15.0", "8.16.0"] agents: provider: gcp image: family/elasticsearch-ubuntu-2004 diff --git a/.ci/bwcVersions b/.ci/bwcVersions index 776be80e0d291..d6072488ae93b 100644 --- a/.ci/bwcVersions +++ b/.ci/bwcVersions @@ -16,7 +16,7 @@ BWC_VERSION: - "7.14.2" - "7.15.2" - "7.16.3" - - "7.17.23" + - "7.17.24" - "8.0.1" - "8.1.3" - "8.2.3" diff --git a/.ci/snapshotBwcVersions b/.ci/snapshotBwcVersions index f5f7f7a7d4ecb..909960a67cc41 100644 --- a/.ci/snapshotBwcVersions +++ b/.ci/snapshotBwcVersions @@ -1,5 +1,5 @@ BWC_VERSION: - - "7.17.23" + - "7.17.24" - "8.14.4" - "8.15.0" - "8.16.0" diff --git a/server/src/main/java/org/elasticsearch/Version.java b/server/src/main/java/org/elasticsearch/Version.java index fefe2ea486485..fd29a81cdb143 100644 --- a/server/src/main/java/org/elasticsearch/Version.java +++ b/server/src/main/java/org/elasticsearch/Version.java @@ -123,6 +123,7 @@ public class Version implements VersionId, ToXContentFragment { public static final Version V_7_17_21 = new Version(7_17_21_99); public static final Version V_7_17_22 = new Version(7_17_22_99); public static final Version V_7_17_23 = new Version(7_17_23_99); + public static final Version V_7_17_24 = new Version(7_17_24_99); public static final Version V_8_0_0 = new Version(8_00_00_99); public static final Version V_8_0_1 = new Version(8_00_01_99); diff --git a/server/src/main/resources/org/elasticsearch/TransportVersions.csv b/server/src/main/resources/org/elasticsearch/TransportVersions.csv index 7d2697539fa13..687e435990785 100644 --- a/server/src/main/resources/org/elasticsearch/TransportVersions.csv +++ b/server/src/main/resources/org/elasticsearch/TransportVersions.csv @@ -70,6 +70,7 @@ 7.17.20,7172099 7.17.21,7172199 7.17.22,7172299 +7.17.23,7172399 8.0.0,8000099 8.0.1,8000199 8.1.0,8010099 diff --git a/server/src/main/resources/org/elasticsearch/index/IndexVersions.csv b/server/src/main/resources/org/elasticsearch/index/IndexVersions.csv index f177ab1468cb2..8c86ca48d6284 100644 --- a/server/src/main/resources/org/elasticsearch/index/IndexVersions.csv +++ b/server/src/main/resources/org/elasticsearch/index/IndexVersions.csv @@ -70,6 +70,7 @@ 7.17.20,7172099 7.17.21,7172199 7.17.22,7172299 +7.17.23,7172399 8.0.0,8000099 8.0.1,8000199 8.1.0,8010099 From 7f715a4cbeb6e899b7784dd5cff91aad3bd0b66a Mon Sep 17 00:00:00 2001 From: Armin Braun Date: Thu, 1 Aug 2024 16:53:03 +0200 Subject: [PATCH 10/36] Only use at most a single thread for search context freeing (#111156) Forking to `GENERIC` makes sense here since we sporadically block for a macroscopic amount of time to protect transport threads, but in almost all cases the operation operates on the same data structures and is very fast. Since it's also very frequent, we shouldn't be creating a bunch of generic threads during a burst -> lets throttle to a single thread. --- .../action/search/SearchTransportService.java | 37 +++++++++++++++++-- 1 file changed, 34 insertions(+), 3 deletions(-) diff --git a/server/src/main/java/org/elasticsearch/action/search/SearchTransportService.java b/server/src/main/java/org/elasticsearch/action/search/SearchTransportService.java index fb3c49d83cb93..52d4542faaf77 100644 --- a/server/src/main/java/org/elasticsearch/action/search/SearchTransportService.java +++ b/server/src/main/java/org/elasticsearch/action/search/SearchTransportService.java @@ -24,9 +24,12 @@ import org.elasticsearch.common.io.stream.StreamInput; import org.elasticsearch.common.io.stream.StreamOutput; import org.elasticsearch.common.io.stream.Writeable; +import org.elasticsearch.common.util.concurrent.AbstractRunnable; import org.elasticsearch.common.util.concurrent.ConcurrentCollections; import org.elasticsearch.common.util.concurrent.EsExecutors; +import org.elasticsearch.common.util.concurrent.ThrottledTaskRunner; import org.elasticsearch.core.Nullable; +import org.elasticsearch.core.Releasable; import org.elasticsearch.search.SearchPhaseResult; import org.elasticsearch.search.SearchService; import org.elasticsearch.search.dfs.DfsSearchResult; @@ -60,6 +63,7 @@ import java.util.HashMap; import java.util.Map; import java.util.Objects; +import java.util.concurrent.Executor; import java.util.function.BiFunction; import static org.elasticsearch.action.search.SearchTransportAPMMetrics.ACTION_ATTRIBUTE_NAME; @@ -455,9 +459,10 @@ public static void registerRequestHandler( boolean freed = searchService.freeReaderContext(request.id()); channel.sendResponse(SearchFreeContextResponse.of(freed)); }; + final Executor freeContextExecutor = buildFreeContextExecutor(transportService); transportService.registerRequestHandler( FREE_CONTEXT_SCROLL_ACTION_NAME, - transportService.getThreadPool().generic(), + freeContextExecutor, ScrollFreeContextRequest::new, instrumentedHandler(FREE_CONTEXT_SCROLL_ACTION_METRIC, transportService, searchTransportMetrics, freeContextHandler) ); @@ -470,7 +475,7 @@ public static void registerRequestHandler( transportService.registerRequestHandler( FREE_CONTEXT_ACTION_NAME, - transportService.getThreadPool().generic(), + freeContextExecutor, SearchFreeContextRequest::new, instrumentedHandler(FREE_CONTEXT_ACTION_METRIC, transportService, searchTransportMetrics, freeContextHandler) ); @@ -478,7 +483,7 @@ public static void registerRequestHandler( transportService.registerRequestHandler( CLEAR_SCROLL_CONTEXTS_ACTION_NAME, - transportService.getThreadPool().generic(), + freeContextExecutor, ClearScrollContextsRequest::new, instrumentedHandler(CLEAR_SCROLL_CONTEXTS_ACTION_METRIC, transportService, searchTransportMetrics, (request, channel, task) -> { searchService.freeAllScrollContexts(); @@ -626,6 +631,32 @@ public static void registerRequestHandler( TransportActionProxy.registerProxyAction(transportService, QUERY_CAN_MATCH_NODE_NAME, true, CanMatchNodeResponse::new); } + private static Executor buildFreeContextExecutor(TransportService transportService) { + final ThrottledTaskRunner throttledTaskRunner = new ThrottledTaskRunner( + "free_context", + 1, + transportService.getThreadPool().generic() + ); + return r -> throttledTaskRunner.enqueueTask(new ActionListener<>() { + @Override + public void onResponse(Releasable releasable) { + try (releasable) { + r.run(); + } + } + + @Override + public void onFailure(Exception e) { + if (r instanceof AbstractRunnable abstractRunnable) { + abstractRunnable.onFailure(e); + } + // should be impossible, GENERIC pool doesn't reject anything + logger.error("unexpected failure running " + r, e); + assert false : new AssertionError("unexpected failure running " + r, e); + } + }); + } + private static TransportRequestHandler instrumentedHandler( String actionQualifier, TransportService transportService, From 028b35129c836ed2a0e94bf07bfd3d8907e1d595 Mon Sep 17 00:00:00 2001 From: David Turner Date: Thu, 1 Aug 2024 17:41:31 +0100 Subject: [PATCH 11/36] Remove unused compatibility shims (#111509) These methods are now unused by any dependent project so can be removed. Relates #107984 --- .../restore/RestoreSnapshotRequest.java | 5 -- .../DeleteStoredScriptRequestBuilder.java | 29 ----------- .../GetStoredScriptRequestBuilder.java | 29 ----------- .../PutStoredScriptRequestBuilder.java | 44 ---------------- .../client/internal/ClusterAdminClient.java | 50 ------------------- 5 files changed, 157 deletions(-) delete mode 100644 server/src/main/java/org/elasticsearch/action/admin/cluster/storedscripts/DeleteStoredScriptRequestBuilder.java delete mode 100644 server/src/main/java/org/elasticsearch/action/admin/cluster/storedscripts/GetStoredScriptRequestBuilder.java delete mode 100644 server/src/main/java/org/elasticsearch/action/admin/cluster/storedscripts/PutStoredScriptRequestBuilder.java diff --git a/server/src/main/java/org/elasticsearch/action/admin/cluster/snapshots/restore/RestoreSnapshotRequest.java b/server/src/main/java/org/elasticsearch/action/admin/cluster/snapshots/restore/RestoreSnapshotRequest.java index f0d47813dad77..f9ee2d84f8732 100644 --- a/server/src/main/java/org/elasticsearch/action/admin/cluster/snapshots/restore/RestoreSnapshotRequest.java +++ b/server/src/main/java/org/elasticsearch/action/admin/cluster/snapshots/restore/RestoreSnapshotRequest.java @@ -68,11 +68,6 @@ public RestoreSnapshotRequest(TimeValue masterNodeTimeout) { super(masterNodeTimeout); } - @Deprecated(forRemoval = true) // temporary compatibility shim - public RestoreSnapshotRequest(String repository, String snapshot) { - this(MasterNodeRequest.TRAPPY_IMPLICIT_DEFAULT_MASTER_NODE_TIMEOUT, repository, snapshot); - } - /** * Constructs a new put repository request with the provided repository and snapshot names. * diff --git a/server/src/main/java/org/elasticsearch/action/admin/cluster/storedscripts/DeleteStoredScriptRequestBuilder.java b/server/src/main/java/org/elasticsearch/action/admin/cluster/storedscripts/DeleteStoredScriptRequestBuilder.java deleted file mode 100644 index 375365c174885..0000000000000 --- a/server/src/main/java/org/elasticsearch/action/admin/cluster/storedscripts/DeleteStoredScriptRequestBuilder.java +++ /dev/null @@ -1,29 +0,0 @@ -/* - * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one - * or more contributor license agreements. Licensed under the Elastic License - * 2.0 and the Server Side Public License, v 1; you may not use this file except - * in compliance with, at your election, the Elastic License 2.0 or the Server - * Side Public License, v 1. - */ - -package org.elasticsearch.action.admin.cluster.storedscripts; - -import org.elasticsearch.action.support.master.AcknowledgedRequestBuilder; -import org.elasticsearch.action.support.master.AcknowledgedResponse; -import org.elasticsearch.client.internal.ElasticsearchClient; -import org.elasticsearch.core.TimeValue; - -public class DeleteStoredScriptRequestBuilder extends AcknowledgedRequestBuilder< - DeleteStoredScriptRequest, - AcknowledgedResponse, - DeleteStoredScriptRequestBuilder> { - - public DeleteStoredScriptRequestBuilder(ElasticsearchClient client, TimeValue masterNodeTimeout, TimeValue ackTimeout) { - super(client, TransportDeleteStoredScriptAction.TYPE, new DeleteStoredScriptRequest(masterNodeTimeout, ackTimeout)); - } - - public DeleteStoredScriptRequestBuilder setId(String id) { - request.id(id); - return this; - } -} diff --git a/server/src/main/java/org/elasticsearch/action/admin/cluster/storedscripts/GetStoredScriptRequestBuilder.java b/server/src/main/java/org/elasticsearch/action/admin/cluster/storedscripts/GetStoredScriptRequestBuilder.java deleted file mode 100644 index 798d78928c860..0000000000000 --- a/server/src/main/java/org/elasticsearch/action/admin/cluster/storedscripts/GetStoredScriptRequestBuilder.java +++ /dev/null @@ -1,29 +0,0 @@ -/* - * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one - * or more contributor license agreements. Licensed under the Elastic License - * 2.0 and the Server Side Public License, v 1; you may not use this file except - * in compliance with, at your election, the Elastic License 2.0 or the Server - * Side Public License, v 1. - */ - -package org.elasticsearch.action.admin.cluster.storedscripts; - -import org.elasticsearch.action.support.master.MasterNodeReadOperationRequestBuilder; -import org.elasticsearch.client.internal.ElasticsearchClient; -import org.elasticsearch.core.TimeValue; - -public class GetStoredScriptRequestBuilder extends MasterNodeReadOperationRequestBuilder< - GetStoredScriptRequest, - GetStoredScriptResponse, - GetStoredScriptRequestBuilder> { - - public GetStoredScriptRequestBuilder(ElasticsearchClient client, TimeValue masterNodeTimeout) { - super(client, GetStoredScriptAction.INSTANCE, new GetStoredScriptRequest(masterNodeTimeout)); - } - - public GetStoredScriptRequestBuilder setId(String id) { - request.id(id); - return this; - } - -} diff --git a/server/src/main/java/org/elasticsearch/action/admin/cluster/storedscripts/PutStoredScriptRequestBuilder.java b/server/src/main/java/org/elasticsearch/action/admin/cluster/storedscripts/PutStoredScriptRequestBuilder.java deleted file mode 100644 index e096fa24e6837..0000000000000 --- a/server/src/main/java/org/elasticsearch/action/admin/cluster/storedscripts/PutStoredScriptRequestBuilder.java +++ /dev/null @@ -1,44 +0,0 @@ -/* - * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one - * or more contributor license agreements. Licensed under the Elastic License - * 2.0 and the Server Side Public License, v 1; you may not use this file except - * in compliance with, at your election, the Elastic License 2.0 or the Server - * Side Public License, v 1. - */ - -package org.elasticsearch.action.admin.cluster.storedscripts; - -import org.elasticsearch.action.support.master.AcknowledgedRequestBuilder; -import org.elasticsearch.action.support.master.AcknowledgedResponse; -import org.elasticsearch.client.internal.ElasticsearchClient; -import org.elasticsearch.common.bytes.BytesReference; -import org.elasticsearch.core.TimeValue; -import org.elasticsearch.xcontent.XContentType; - -public class PutStoredScriptRequestBuilder extends AcknowledgedRequestBuilder< - PutStoredScriptRequest, - AcknowledgedResponse, - PutStoredScriptRequestBuilder> { - - public PutStoredScriptRequestBuilder(ElasticsearchClient client, TimeValue masterNodeTimeout, TimeValue ackTimeout) { - super(client, TransportPutStoredScriptAction.TYPE, new PutStoredScriptRequest(masterNodeTimeout, ackTimeout)); - } - - public PutStoredScriptRequestBuilder setId(String id) { - request.id(id); - return this; - } - - public PutStoredScriptRequestBuilder setContext(String context) { - request.context(context); - return this; - } - - /** - * Set the source of the script along with the content type of the source - */ - public PutStoredScriptRequestBuilder setContent(BytesReference source, XContentType xContentType) { - request.content(source, xContentType); - return this; - } -} diff --git a/server/src/main/java/org/elasticsearch/client/internal/ClusterAdminClient.java b/server/src/main/java/org/elasticsearch/client/internal/ClusterAdminClient.java index 995fe99cadffb..4d5a670925b5b 100644 --- a/server/src/main/java/org/elasticsearch/client/internal/ClusterAdminClient.java +++ b/server/src/main/java/org/elasticsearch/client/internal/ClusterAdminClient.java @@ -94,16 +94,6 @@ import org.elasticsearch.action.admin.cluster.stats.ClusterStatsRequestBuilder; import org.elasticsearch.action.admin.cluster.stats.ClusterStatsResponse; import org.elasticsearch.action.admin.cluster.stats.TransportClusterStatsAction; -import org.elasticsearch.action.admin.cluster.storedscripts.DeleteStoredScriptRequest; -import org.elasticsearch.action.admin.cluster.storedscripts.DeleteStoredScriptRequestBuilder; -import org.elasticsearch.action.admin.cluster.storedscripts.GetStoredScriptAction; -import org.elasticsearch.action.admin.cluster.storedscripts.GetStoredScriptRequest; -import org.elasticsearch.action.admin.cluster.storedscripts.GetStoredScriptRequestBuilder; -import org.elasticsearch.action.admin.cluster.storedscripts.GetStoredScriptResponse; -import org.elasticsearch.action.admin.cluster.storedscripts.PutStoredScriptRequest; -import org.elasticsearch.action.admin.cluster.storedscripts.PutStoredScriptRequestBuilder; -import org.elasticsearch.action.admin.cluster.storedscripts.TransportDeleteStoredScriptAction; -import org.elasticsearch.action.admin.cluster.storedscripts.TransportPutStoredScriptAction; import org.elasticsearch.action.ingest.DeletePipelineRequest; import org.elasticsearch.action.ingest.DeletePipelineRequestBuilder; import org.elasticsearch.action.ingest.DeletePipelineTransportAction; @@ -118,9 +108,7 @@ import org.elasticsearch.action.ingest.SimulatePipelineRequest; import org.elasticsearch.action.ingest.SimulatePipelineRequestBuilder; import org.elasticsearch.action.ingest.SimulatePipelineResponse; -import org.elasticsearch.action.support.master.AcknowledgedRequest; import org.elasticsearch.action.support.master.AcknowledgedResponse; -import org.elasticsearch.action.support.master.MasterNodeRequest; import org.elasticsearch.common.bytes.BytesReference; import org.elasticsearch.core.TimeValue; import org.elasticsearch.tasks.TaskId; @@ -425,42 +413,4 @@ public ActionFuture simulatePipeline(SimulatePipelineR public SimulatePipelineRequestBuilder prepareSimulatePipeline(BytesReference source, XContentType xContentType) { return new SimulatePipelineRequestBuilder(this, source, xContentType); } - - @Deprecated(forRemoval = true) // temporary compatibility shim - public PutStoredScriptRequestBuilder preparePutStoredScript() { - return new PutStoredScriptRequestBuilder( - this, - MasterNodeRequest.TRAPPY_IMPLICIT_DEFAULT_MASTER_NODE_TIMEOUT, - AcknowledgedRequest.DEFAULT_ACK_TIMEOUT - ); - } - - @Deprecated(forRemoval = true) // temporary compatibility shim - public void deleteStoredScript(DeleteStoredScriptRequest request, ActionListener listener) { - execute(TransportDeleteStoredScriptAction.TYPE, request, listener); - } - - @Deprecated(forRemoval = true) // temporary compatibility shim - public DeleteStoredScriptRequestBuilder prepareDeleteStoredScript(String id) { - return new DeleteStoredScriptRequestBuilder( - client, - MasterNodeRequest.TRAPPY_IMPLICIT_DEFAULT_MASTER_NODE_TIMEOUT, - AcknowledgedRequest.DEFAULT_ACK_TIMEOUT - ).setId(id); - } - - @Deprecated(forRemoval = true) // temporary compatibility shim - public void putStoredScript(final PutStoredScriptRequest request, ActionListener listener) { - execute(TransportPutStoredScriptAction.TYPE, request, listener); - } - - @Deprecated(forRemoval = true) // temporary compatibility shim - public GetStoredScriptRequestBuilder prepareGetStoredScript(String id) { - return new GetStoredScriptRequestBuilder(this, MasterNodeRequest.TRAPPY_IMPLICIT_DEFAULT_MASTER_NODE_TIMEOUT).setId(id); - } - - @Deprecated(forRemoval = true) // temporary compatibility shim - public void getStoredScript(final GetStoredScriptRequest request, final ActionListener listener) { - execute(GetStoredScriptAction.INSTANCE, request, listener); - } } From ea692d1348dcdbda177a96ef15c0d0ddf80012c1 Mon Sep 17 00:00:00 2001 From: Nik Everett Date: Thu, 1 Aug 2024 13:54:28 -0400 Subject: [PATCH 12/36] ESQL: Don't mutate the BoolQueryBuilder in plan (#111519) This modifies ESQL's `QueryBuilder` merging to stop it from mutating `BoolQueryBuilder`s in place. It's more efficient when you can do that, but only marginally so. Instead we create a shallow copy of the same builder and mutate *that*. That lines up much better with the plan being immutable objects. It should be! The resulting queries that ESQL sends to lucene are the same here - we just modify how we build them. This should stop a fun class of bugs that can come up when we mutate the query builders in multiple threads - because we *do* replan the query in multiple threads. That's fine. So long as we shallow copy, like we do in this PR. --- docs/changelog/111519.yaml | 5 + .../index/query/BoolQueryBuilder.java | 24 +++ .../index/query/BoolQueryBuilderTests.java | 37 ++++ .../xpack/esql/core/util/Queries.java | 2 +- .../xpack/esql/core/util/QueriesTests.java | 34 +++- .../esql/qa/server/multi-node/build.gradle | 3 + .../xpack/esql/qa/multi_node/Clusters.java | 4 +- .../xpack/esql/qa/multi_node/EsqlSpecIT.java | 2 +- .../esql/qa/multi_node/FieldExtractorIT.java | 2 +- .../xpack/esql/qa/multi_node/RestEsqlIT.java | 38 +++++ .../esql/qa/multi_node/EsqlClientYamlIT.java | 2 +- .../esql/qa/server/single-node/build.gradle | 6 +- .../xpack/esql/qa/single_node/RestEsqlIT.java | 85 +--------- .../xpack/esql/qa/rest/RestEsqlTestCase.java | 158 ++++++++++++++++++ 14 files changed, 302 insertions(+), 100 deletions(-) create mode 100644 docs/changelog/111519.yaml create mode 100644 x-pack/plugin/esql/qa/server/multi-node/src/javaRestTest/java/org/elasticsearch/xpack/esql/qa/multi_node/RestEsqlIT.java diff --git a/docs/changelog/111519.yaml b/docs/changelog/111519.yaml new file mode 100644 index 0000000000000..8cc62fb8ed903 --- /dev/null +++ b/docs/changelog/111519.yaml @@ -0,0 +1,5 @@ +pr: 111519 +summary: "ESQL: Don't mutate the `BoolQueryBuilder` in plan" +area: ES|QL +type: bug +issues: [] diff --git a/server/src/main/java/org/elasticsearch/index/query/BoolQueryBuilder.java b/server/src/main/java/org/elasticsearch/index/query/BoolQueryBuilder.java index 4b4727bca4198..9856bab64ec6a 100644 --- a/server/src/main/java/org/elasticsearch/index/query/BoolQueryBuilder.java +++ b/server/src/main/java/org/elasticsearch/index/query/BoolQueryBuilder.java @@ -410,4 +410,28 @@ private static boolean rewriteClauses( public TransportVersion getMinimalSupportedVersion() { return TransportVersions.ZERO; } + + /** + * Create a new builder with the same clauses but modification of + * the builder won't modify the original. Modifications of any + * of the copy's clauses will modify the original. Don't so that. + */ + public BoolQueryBuilder shallowCopy() { + BoolQueryBuilder copy = new BoolQueryBuilder(); + copy.adjustPureNegative = adjustPureNegative; + copy.minimumShouldMatch = minimumShouldMatch; + for (QueryBuilder q : mustClauses) { + copy.must(q); + } + for (QueryBuilder q : mustNotClauses) { + copy.mustNot(q); + } + for (QueryBuilder q : filterClauses) { + copy.filter(q); + } + for (QueryBuilder q : shouldClauses) { + copy.should(q); + } + return copy; + } } diff --git a/server/src/test/java/org/elasticsearch/index/query/BoolQueryBuilderTests.java b/server/src/test/java/org/elasticsearch/index/query/BoolQueryBuilderTests.java index c29957f04c515..a4cc96a2063dc 100644 --- a/server/src/test/java/org/elasticsearch/index/query/BoolQueryBuilderTests.java +++ b/server/src/test/java/org/elasticsearch/index/query/BoolQueryBuilderTests.java @@ -32,7 +32,9 @@ import static org.elasticsearch.index.query.QueryBuilders.termQuery; import static org.hamcrest.CoreMatchers.containsString; import static org.hamcrest.CoreMatchers.equalTo; +import static org.hamcrest.CoreMatchers.hasItem; import static org.hamcrest.CoreMatchers.instanceOf; +import static org.hamcrest.Matchers.not; public class BoolQueryBuilderTests extends AbstractQueryTestCase { @Override @@ -463,4 +465,39 @@ public void testMustRewrite() throws IOException { IllegalStateException e = expectThrows(IllegalStateException.class, () -> boolQuery.toQuery(context)); assertEquals("Rewrite first", e.getMessage()); } + + public void testShallowCopy() { + BoolQueryBuilder orig = createTestQueryBuilder(); + BoolQueryBuilder shallowCopy = orig.shallowCopy(); + assertThat(shallowCopy.adjustPureNegative(), equalTo(orig.adjustPureNegative())); + assertThat(shallowCopy.minimumShouldMatch(), equalTo(orig.minimumShouldMatch())); + assertThat(shallowCopy.must(), equalTo(orig.must())); + assertThat(shallowCopy.mustNot(), equalTo(orig.mustNot())); + assertThat(shallowCopy.should(), equalTo(orig.should())); + assertThat(shallowCopy.filter(), equalTo(orig.filter())); + + QueryBuilder b = new MatchQueryBuilder("foo", "bar"); + switch (between(0, 3)) { + case 0 -> { + shallowCopy.must(b); + assertThat(shallowCopy.must(), hasItem(b)); + assertThat(orig.must(), not(hasItem(b))); + } + case 1 -> { + shallowCopy.mustNot(b); + assertThat(shallowCopy.mustNot(), hasItem(b)); + assertThat(orig.mustNot(), not(hasItem(b))); + } + case 2 -> { + shallowCopy.should(b); + assertThat(shallowCopy.should(), hasItem(b)); + assertThat(orig.should(), not(hasItem(b))); + } + case 3 -> { + shallowCopy.filter(b); + assertThat(shallowCopy.filter(), hasItem(b)); + assertThat(orig.filter(), not(hasItem(b))); + } + } + } } diff --git a/x-pack/plugin/esql-core/src/main/java/org/elasticsearch/xpack/esql/core/util/Queries.java b/x-pack/plugin/esql-core/src/main/java/org/elasticsearch/xpack/esql/core/util/Queries.java index 9403c3c6a0bc0..759d7b80acc22 100644 --- a/x-pack/plugin/esql-core/src/main/java/org/elasticsearch/xpack/esql/core/util/Queries.java +++ b/x-pack/plugin/esql-core/src/main/java/org/elasticsearch/xpack/esql/core/util/Queries.java @@ -50,7 +50,7 @@ public static QueryBuilder combine(Clause clause, List queries) { if (firstQuery == null) { firstQuery = query; if (firstQuery instanceof BoolQueryBuilder bqb) { - bool = bqb; + bool = bqb.shallowCopy(); } } // at least two entries, start copying diff --git a/x-pack/plugin/esql-core/src/test/java/org/elasticsearch/xpack/esql/core/util/QueriesTests.java b/x-pack/plugin/esql-core/src/test/java/org/elasticsearch/xpack/esql/core/util/QueriesTests.java index c5f4eb2ba8283..8dde968640c21 100644 --- a/x-pack/plugin/esql-core/src/test/java/org/elasticsearch/xpack/esql/core/util/QueriesTests.java +++ b/x-pack/plugin/esql-core/src/test/java/org/elasticsearch/xpack/esql/core/util/QueriesTests.java @@ -14,6 +14,7 @@ import static java.util.Arrays.asList; import static org.hamcrest.Matchers.everyItem; +import static org.hamcrest.Matchers.hasItem; import static org.hamcrest.Matchers.in; import static org.hamcrest.Matchers.instanceOf; import static org.hamcrest.Matchers.sameInstance; @@ -92,13 +93,27 @@ public void testCombineBoolQueries() { assertThat(combination, instanceOf(BoolQueryBuilder.class)); var bool = (BoolQueryBuilder) combination; + assertBoolQueryMerge(queries, bool, clause); + } - var clauseList = clause.innerQueries.apply(bool); + private void assertBoolQueryMerge(QueryBuilder[] queries, BoolQueryBuilder bool, Queries.Clause clause) { + BoolQueryBuilder first = (BoolQueryBuilder) queries[0]; + for (QueryBuilder b : first.must()) { + assertThat(bool.must(), hasItem(b)); + } + for (QueryBuilder b : first.mustNot()) { + assertThat(bool.mustNot(), hasItem(b)); + } + for (QueryBuilder b : first.should()) { + assertThat(bool.should(), hasItem(b)); + } + for (QueryBuilder b : first.filter()) { + assertThat(bool.filter(), hasItem(b)); + } - for (QueryBuilder query : queries) { - if (query != bool) { - assertThat(query, in(clauseList)); - } + var clauseList = clause.innerQueries.apply(bool); + for (int i = 1; i < queries.length; i++) { + assertThat(queries[i], in(clauseList)); } } @@ -118,10 +133,11 @@ public void testCombineMixedBoolAndNonBoolQueries() { assertThat(combination, instanceOf(BoolQueryBuilder.class)); var bool = (BoolQueryBuilder) combination; - var clauseList = clause.innerQueries.apply(bool); - - for (QueryBuilder query : queries) { - if (query != bool) { + if (queries[0] instanceof BoolQueryBuilder) { + assertBoolQueryMerge(queries, bool, clause); + } else { + var clauseList = clause.innerQueries.apply(bool); + for (QueryBuilder query : queries) { assertThat(query, in(clauseList)); } } diff --git a/x-pack/plugin/esql/qa/server/multi-node/build.gradle b/x-pack/plugin/esql/qa/server/multi-node/build.gradle index 6bba58b721a94..9f8ca78aba81e 100644 --- a/x-pack/plugin/esql/qa/server/multi-node/build.gradle +++ b/x-pack/plugin/esql/qa/server/multi-node/build.gradle @@ -8,6 +8,9 @@ dependencies { javaRestTestImplementation project(xpackModule('esql:qa:testFixtures')) javaRestTestImplementation project(xpackModule('esql:qa:server')) yamlRestTestImplementation project(xpackModule('esql:qa:server')) + + clusterPlugins project(':plugins:mapper-size') + clusterPlugins project(':plugins:mapper-murmur3') } GradleUtils.extendSourceSet(project, "javaRestTest", "yamlRestTest") diff --git a/x-pack/plugin/esql/qa/server/multi-node/src/javaRestTest/java/org/elasticsearch/xpack/esql/qa/multi_node/Clusters.java b/x-pack/plugin/esql/qa/server/multi-node/src/javaRestTest/java/org/elasticsearch/xpack/esql/qa/multi_node/Clusters.java index 4aa17801fa217..3a68aee9fd205 100644 --- a/x-pack/plugin/esql/qa/server/multi-node/src/javaRestTest/java/org/elasticsearch/xpack/esql/qa/multi_node/Clusters.java +++ b/x-pack/plugin/esql/qa/server/multi-node/src/javaRestTest/java/org/elasticsearch/xpack/esql/qa/multi_node/Clusters.java @@ -8,15 +8,17 @@ package org.elasticsearch.xpack.esql.qa.multi_node; import org.elasticsearch.test.cluster.ElasticsearchCluster; +import org.elasticsearch.test.cluster.local.LocalClusterConfigProvider; import org.elasticsearch.test.cluster.local.distribution.DistributionType; public class Clusters { - public static ElasticsearchCluster testCluster() { + public static ElasticsearchCluster testCluster(LocalClusterConfigProvider configProvider) { return ElasticsearchCluster.local() .distribution(DistributionType.DEFAULT) .nodes(2) .setting("xpack.security.enabled", "false") .setting("xpack.license.self_generated.type", "trial") + .apply(() -> configProvider) .build(); } } diff --git a/x-pack/plugin/esql/qa/server/multi-node/src/javaRestTest/java/org/elasticsearch/xpack/esql/qa/multi_node/EsqlSpecIT.java b/x-pack/plugin/esql/qa/server/multi-node/src/javaRestTest/java/org/elasticsearch/xpack/esql/qa/multi_node/EsqlSpecIT.java index 93385ec9efd89..3a0c400de1795 100644 --- a/x-pack/plugin/esql/qa/server/multi-node/src/javaRestTest/java/org/elasticsearch/xpack/esql/qa/multi_node/EsqlSpecIT.java +++ b/x-pack/plugin/esql/qa/server/multi-node/src/javaRestTest/java/org/elasticsearch/xpack/esql/qa/multi_node/EsqlSpecIT.java @@ -14,7 +14,7 @@ public class EsqlSpecIT extends EsqlSpecTestCase { @ClassRule - public static ElasticsearchCluster cluster = Clusters.testCluster(); + public static ElasticsearchCluster cluster = Clusters.testCluster(spec -> {}); @Override protected String getTestRestCluster() { diff --git a/x-pack/plugin/esql/qa/server/multi-node/src/javaRestTest/java/org/elasticsearch/xpack/esql/qa/multi_node/FieldExtractorIT.java b/x-pack/plugin/esql/qa/server/multi-node/src/javaRestTest/java/org/elasticsearch/xpack/esql/qa/multi_node/FieldExtractorIT.java index bcb83a31f7641..9ec454db2d325 100644 --- a/x-pack/plugin/esql/qa/server/multi-node/src/javaRestTest/java/org/elasticsearch/xpack/esql/qa/multi_node/FieldExtractorIT.java +++ b/x-pack/plugin/esql/qa/server/multi-node/src/javaRestTest/java/org/elasticsearch/xpack/esql/qa/multi_node/FieldExtractorIT.java @@ -17,7 +17,7 @@ @ThreadLeakFilters(filters = TestClustersThreadFilter.class) public class FieldExtractorIT extends FieldExtractorTestCase { @ClassRule - public static ElasticsearchCluster cluster = Clusters.testCluster(); + public static ElasticsearchCluster cluster = Clusters.testCluster(spec -> {}); @Override protected String getTestRestCluster() { diff --git a/x-pack/plugin/esql/qa/server/multi-node/src/javaRestTest/java/org/elasticsearch/xpack/esql/qa/multi_node/RestEsqlIT.java b/x-pack/plugin/esql/qa/server/multi-node/src/javaRestTest/java/org/elasticsearch/xpack/esql/qa/multi_node/RestEsqlIT.java new file mode 100644 index 0000000000000..7e98d486d6c2e --- /dev/null +++ b/x-pack/plugin/esql/qa/server/multi-node/src/javaRestTest/java/org/elasticsearch/xpack/esql/qa/multi_node/RestEsqlIT.java @@ -0,0 +1,38 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License + * 2.0; you may not use this file except in compliance with the Elastic License + * 2.0. + */ + +package org.elasticsearch.xpack.esql.qa.multi_node; + +import com.carrotsearch.randomizedtesting.annotations.ParametersFactory; + +import org.elasticsearch.test.cluster.ElasticsearchCluster; +import org.elasticsearch.xpack.esql.qa.rest.RestEsqlTestCase; +import org.junit.ClassRule; + +import java.util.Arrays; +import java.util.List; + +public class RestEsqlIT extends RestEsqlTestCase { + @ClassRule + public static ElasticsearchCluster cluster = Clusters.testCluster( + specBuilder -> specBuilder.plugin("mapper-size").plugin("mapper-murmur3") + ); + + @Override + protected String getTestRestCluster() { + return cluster.getHttpAddresses(); + } + + @ParametersFactory(argumentFormatting = "%1s") + public static List modes() { + return Arrays.stream(Mode.values()).map(m -> new Object[] { m }).toList(); + } + + public RestEsqlIT(Mode mode) { + super(mode); + } +} diff --git a/x-pack/plugin/esql/qa/server/multi-node/src/yamlRestTest/java/org/elasticsearch/xpack/esql/qa/multi_node/EsqlClientYamlIT.java b/x-pack/plugin/esql/qa/server/multi-node/src/yamlRestTest/java/org/elasticsearch/xpack/esql/qa/multi_node/EsqlClientYamlIT.java index d3ddae16e8af1..62e83c9280087 100644 --- a/x-pack/plugin/esql/qa/server/multi-node/src/yamlRestTest/java/org/elasticsearch/xpack/esql/qa/multi_node/EsqlClientYamlIT.java +++ b/x-pack/plugin/esql/qa/server/multi-node/src/yamlRestTest/java/org/elasticsearch/xpack/esql/qa/multi_node/EsqlClientYamlIT.java @@ -19,7 +19,7 @@ public class EsqlClientYamlIT extends ESClientYamlSuiteTestCase { @ClassRule - public static ElasticsearchCluster cluster = Clusters.testCluster(); + public static ElasticsearchCluster cluster = Clusters.testCluster(spec -> {}); @Override protected String getTestRestCluster() { diff --git a/x-pack/plugin/esql/qa/server/single-node/build.gradle b/x-pack/plugin/esql/qa/server/single-node/build.gradle index 865d7cf5f5e6c..ab8e3d4b32d9a 100644 --- a/x-pack/plugin/esql/qa/server/single-node/build.gradle +++ b/x-pack/plugin/esql/qa/server/single-node/build.gradle @@ -20,10 +20,8 @@ dependencies { javaRestTestImplementation("org.slf4j:slf4j-nop:${versions.slf4j}") javaRestTestImplementation('org.apache.arrow:arrow-memory-unsafe:16.1.0') - dependencies { - clusterPlugins project(':plugins:mapper-size') - clusterPlugins project(':plugins:mapper-murmur3') - } + clusterPlugins project(':plugins:mapper-size') + clusterPlugins project(':plugins:mapper-murmur3') } restResources { diff --git a/x-pack/plugin/esql/qa/server/single-node/src/javaRestTest/java/org/elasticsearch/xpack/esql/qa/single_node/RestEsqlIT.java b/x-pack/plugin/esql/qa/server/single-node/src/javaRestTest/java/org/elasticsearch/xpack/esql/qa/single_node/RestEsqlIT.java index 797fc803ed531..d679ee18d0a73 100644 --- a/x-pack/plugin/esql/qa/server/single-node/src/javaRestTest/java/org/elasticsearch/xpack/esql/qa/single_node/RestEsqlIT.java +++ b/x-pack/plugin/esql/qa/server/single-node/src/javaRestTest/java/org/elasticsearch/xpack/esql/qa/single_node/RestEsqlIT.java @@ -13,7 +13,6 @@ import org.apache.lucene.search.DocIdSetIterator; import org.elasticsearch.Build; import org.elasticsearch.client.Request; -import org.elasticsearch.client.Response; import org.elasticsearch.client.ResponseException; import org.elasticsearch.common.io.Streams; import org.elasticsearch.common.settings.Settings; @@ -22,25 +21,20 @@ import org.elasticsearch.test.TestClustersThreadFilter; import org.elasticsearch.test.cluster.ElasticsearchCluster; import org.elasticsearch.test.cluster.LogType; -import org.elasticsearch.xcontent.XContentType; import org.elasticsearch.xpack.esql.qa.rest.RestEsqlTestCase; import org.hamcrest.Matchers; -import org.junit.Assert; import org.junit.ClassRule; import java.io.IOException; import java.io.InputStream; -import java.nio.charset.StandardCharsets; import java.util.ArrayList; import java.util.Arrays; import java.util.List; -import java.util.Locale; import java.util.Map; import static org.elasticsearch.test.ListMatcher.matchesList; import static org.elasticsearch.test.MapMatcher.assertMap; import static org.elasticsearch.test.MapMatcher.matchesMap; -import static org.hamcrest.Matchers.any; import static org.hamcrest.Matchers.containsInAnyOrder; import static org.hamcrest.Matchers.containsString; import static org.hamcrest.Matchers.equalTo; @@ -72,7 +66,7 @@ public RestEsqlIT(Mode mode) { } public void testBasicEsql() throws IOException { - indexTestData(); + indexTimestampData(1); RequestObjectBuilder builder = requestObjectBuilder().query(fromIndex() + " | stats avg(value)"); if (Build.current().isSnapshot()) { @@ -274,50 +268,8 @@ public void testTableDuplicateNames() throws IOException { assertThat(re.getMessage(), containsString("[6:10] Duplicate field 'a'")); } - /** - * INLINESTATS can group on {@code NOW()}. It's a little silly, but - * doing something like {@code DATE_TRUNC(1 YEAR, NOW() - 1970-01-01T00:00:00Z)} is - * much more sensible. But just grouping on {@code NOW()} is enough to test this. - *

- * This works because {@code NOW()} locks it's value at the start of the entire - * query. It's part of the "configuration" of the query. - *

- */ - public void testInlineStatsNow() throws IOException { - assumeTrue("INLINESTATS only available on snapshots", Build.current().isSnapshot()); - indexTestData(); - - RequestObjectBuilder builder = requestObjectBuilder().query( - fromIndex() + " | EVAL now=NOW() | INLINESTATS AVG(value) BY now | SORT value ASC" - ); - Map result = runEsql(builder); - ListMatcher values = matchesList(); - for (int i = 0; i < 1000; i++) { - values = values.item( - matchesList().item("2020-12-12T00:00:00.000Z") - .item("value" + i) - .item("value" + i) - .item(i) - .item(any(String.class)) - .item(499.5) - ); - } - assertMap( - result, - matchesMap().entry( - "columns", - matchesList().item(matchesMap().entry("name", "@timestamp").entry("type", "date")) - .item(matchesMap().entry("name", "test").entry("type", "text")) - .item(matchesMap().entry("name", "test.keyword").entry("type", "keyword")) - .item(matchesMap().entry("name", "value").entry("type", "long")) - .item(matchesMap().entry("name", "now").entry("type", "date")) - .item(matchesMap().entry("name", "AVG(value)").entry("type", "double")) - ).entry("values", values) - ); - } - public void testProfile() throws IOException { - indexTestData(); + indexTimestampData(1); RequestObjectBuilder builder = requestObjectBuilder().query(fromIndex() + " | STATS AVG(value)"); builder.profile(true); @@ -371,7 +323,7 @@ public void testProfile() throws IOException { public void testInlineStatsProfile() throws IOException { assumeTrue("INLINESTATS only available on snapshots", Build.current().isSnapshot()); - indexTestData(); + indexTimestampData(1); RequestObjectBuilder builder = requestObjectBuilder().query(fromIndex() + " | INLINESTATS AVG(value) | SORT value ASC"); builder.profile(true); @@ -486,37 +438,6 @@ private MapMatcher basicProfile() { return matchesMap().entry("pages_processed", greaterThan(0)).entry("process_nanos", greaterThan(0)); } - private void indexTestData() throws IOException { - Request createIndex = new Request("PUT", testIndexName()); - createIndex.setJsonEntity(""" - { - "settings": { - "index": { - "number_of_shards": 1 - } - } - }"""); - Response response = client().performRequest(createIndex); - assertThat( - entityToMap(response.getEntity(), XContentType.JSON), - matchesMap().entry("shards_acknowledged", true).entry("index", testIndexName()).entry("acknowledged", true) - ); - - StringBuilder b = new StringBuilder(); - for (int i = 0; i < 1000; i++) { - b.append(String.format(Locale.ROOT, """ - {"create":{"_index":"%s"}} - {"@timestamp":"2020-12-12","test":"value%s","value":%d} - """, testIndexName(), i, i)); - } - Request bulk = new Request("POST", "/_bulk"); - bulk.addParameter("refresh", "true"); - bulk.addParameter("filter_path", "errors"); - bulk.setJsonEntity(b.toString()); - response = client().performRequest(bulk); - Assert.assertEquals("{\"errors\":false}", EntityUtils.toString(response.getEntity(), StandardCharsets.UTF_8)); - } - private void assertException(String query, String... errorMessages) throws IOException { ResponseException re = expectThrows(ResponseException.class, () -> runEsqlSync(requestObjectBuilder().query(query))); assertThat(re.getResponse().getStatusLine().getStatusCode(), equalTo(400)); diff --git a/x-pack/plugin/esql/qa/server/src/main/java/org/elasticsearch/xpack/esql/qa/rest/RestEsqlTestCase.java b/x-pack/plugin/esql/qa/server/src/main/java/org/elasticsearch/xpack/esql/qa/rest/RestEsqlTestCase.java index 82b7459066586..81e82a8d60b77 100644 --- a/x-pack/plugin/esql/qa/server/src/main/java/org/elasticsearch/xpack/esql/qa/rest/RestEsqlTestCase.java +++ b/x-pack/plugin/esql/qa/server/src/main/java/org/elasticsearch/xpack/esql/qa/rest/RestEsqlTestCase.java @@ -11,6 +11,7 @@ import org.apache.http.entity.ContentType; import org.apache.http.nio.entity.NByteArrayEntity; import org.apache.http.util.EntityUtils; +import org.elasticsearch.Build; import org.elasticsearch.client.Request; import org.elasticsearch.client.RequestOptions; import org.elasticsearch.client.Response; @@ -20,16 +21,19 @@ import org.elasticsearch.common.io.Streams; import org.elasticsearch.common.settings.Settings; import org.elasticsearch.common.xcontent.XContentHelper; +import org.elasticsearch.core.CheckedConsumer; import org.elasticsearch.core.Nullable; import org.elasticsearch.core.TimeValue; import org.elasticsearch.logging.LogManager; import org.elasticsearch.logging.Logger; +import org.elasticsearch.test.ListMatcher; import org.elasticsearch.test.rest.ESRestTestCase; import org.elasticsearch.xcontent.ToXContent; import org.elasticsearch.xcontent.XContentBuilder; import org.elasticsearch.xcontent.XContentType; import org.elasticsearch.xpack.esql.EsqlTestUtils; import org.junit.After; +import org.junit.Assert; import org.junit.Before; import java.io.ByteArrayOutputStream; @@ -52,11 +56,13 @@ import static java.util.Collections.emptySet; import static org.elasticsearch.common.logging.LoggerMessageFormat.format; +import static org.elasticsearch.test.ListMatcher.matchesList; import static org.elasticsearch.test.MapMatcher.assertMap; import static org.elasticsearch.test.MapMatcher.matchesMap; import static org.elasticsearch.xpack.esql.EsqlTestUtils.as; import static org.elasticsearch.xpack.esql.qa.rest.RestEsqlTestCase.Mode.ASYNC; import static org.elasticsearch.xpack.esql.qa.rest.RestEsqlTestCase.Mode.SYNC; +import static org.hamcrest.Matchers.any; import static org.hamcrest.Matchers.containsString; import static org.hamcrest.Matchers.either; import static org.hamcrest.Matchers.emptyOrNullString; @@ -121,6 +127,8 @@ public static class RequestObjectBuilder { private Boolean profile = null; + private CheckedConsumer filter; + public RequestObjectBuilder() throws IOException { this(randomFrom(XContentType.values())); } @@ -187,6 +195,11 @@ public RequestObjectBuilder profile(boolean profile) { return this; } + public RequestObjectBuilder filter(CheckedConsumer filter) { + this.filter = filter; + return this; + } + public RequestObjectBuilder build() throws IOException { if (isBuilt == false) { if (tables != null) { @@ -205,6 +218,11 @@ public RequestObjectBuilder build() throws IOException { if (profile != null) { builder.field("profile", profile); } + if (filter != null) { + builder.startObject("filter"); + filter.accept(builder); + builder.endObject(); + } builder.endObject(); isBuilt = true; } @@ -594,6 +612,115 @@ public void testComplexFieldNames() throws IOException { assertThat(e.getMessage(), containsString("The field names are too complex to process")); } + /** + * INLINESTATS can group on {@code NOW()}. It's a little silly, but + * doing something like {@code DATE_TRUNC(1 YEAR, NOW() - 1970-01-01T00:00:00Z)} is + * much more sensible. But just grouping on {@code NOW()} is enough to test this. + *

+ * This works because {@code NOW()} locks it's value at the start of the entire + * query. It's part of the "configuration" of the query. + *

+ */ + public void testInlineStatsNow() throws IOException { + assumeTrue("INLINESTATS only available on snapshots", Build.current().isSnapshot()); + indexTimestampData(1); + + RequestObjectBuilder builder = requestObjectBuilder().query( + fromIndex() + " | EVAL now=NOW() | INLINESTATS AVG(value) BY now | SORT value ASC" + ); + Map result = runEsql(builder); + ListMatcher values = matchesList(); + for (int i = 0; i < 1000; i++) { + values = values.item( + matchesList().item("2020-12-12T00:00:00.000Z") + .item("value" + i) + .item("value" + i) + .item(i) + .item(any(String.class)) + .item(499.5) + ); + } + assertMap( + result, + matchesMap().entry( + "columns", + matchesList().item(matchesMap().entry("name", "@timestamp").entry("type", "date")) + .item(matchesMap().entry("name", "test").entry("type", "text")) + .item(matchesMap().entry("name", "test.keyword").entry("type", "keyword")) + .item(matchesMap().entry("name", "value").entry("type", "long")) + .item(matchesMap().entry("name", "now").entry("type", "date")) + .item(matchesMap().entry("name", "AVG(value)").entry("type", "double")) + ).entry("values", values) + ); + } + + public void testTopLevelFilter() throws IOException { + indexTimestampData(3); // Multiple shards has caused a bug in the past with the merging case below + + RequestObjectBuilder builder = requestObjectBuilder().filter(b -> { + b.startObject("range"); + { + b.startObject("@timestamp").field("gte", "2020-12-12").endObject(); + } + b.endObject(); + }).query(fromIndex() + " | STATS SUM(value)"); + assertMap( + runEsql(builder), + matchesMap().entry("columns", matchesList().item(matchesMap().entry("name", "SUM(value)").entry("type", "long"))) + .entry("values", List.of(List.of(499500))) + ); + } + + public void testTopLevelFilterMerged() throws IOException { + indexTimestampData(3); // Multiple shards has caused a bug in the past with the merging case below + + RequestObjectBuilder builder = requestObjectBuilder().filter(b -> { + b.startObject("range"); + { + b.startObject("@timestamp").field("gte", "2020-12-12").endObject(); + } + b.endObject(); + }).query(fromIndex() + " | WHERE value == 12 | STATS SUM(value)"); + assertMap( + runEsql(builder), + matchesMap().entry("columns", matchesList().item(matchesMap().entry("name", "SUM(value)").entry("type", "long"))) + .entry("values", List.of(List.of(12))) + ); + } + + public void testTopLevelFilterBoolMerged() throws IOException { + indexTimestampData(3); // Multiple shards has caused a bug in the past + + for (int i = 0; i < 100; i++) { + // Run the query many times so we're more likely to bump into any sort of modification problems + RequestObjectBuilder builder = requestObjectBuilder().filter(b -> { + b.startObject("bool"); + { + b.startArray("filter"); + { + b.startObject().startObject("range"); + { + b.startObject("@timestamp").field("gte", "2020-12-12").endObject(); + } + b.endObject().endObject(); + b.startObject().startObject("match"); + { + b.field("test", "value12"); + } + b.endObject().endObject(); + } + b.endArray(); + } + b.endObject(); + }).query(fromIndex() + " | WHERE @timestamp > \"2010-01-01\" | STATS SUM(value)"); + assertMap( + runEsql(builder), + matchesMap().entry("columns", matchesList().item(matchesMap().entry("name", "SUM(value)").entry("type", "long"))) + .entry("values", List.of(List.of(12))) + ); + } + } + private static String queryWithComplexFieldNames(int field) { StringBuilder query = new StringBuilder(); query.append(" | keep ").append(randomAlphaOfLength(10)).append(1); @@ -1009,4 +1136,35 @@ protected boolean preserveClusterUponCompletion() { public void assertRequestBreakerEmpty() throws Exception { EsqlSpecTestCase.assertRequestBreakerEmpty(); } + + protected void indexTimestampData(int shards) throws IOException { + Request createIndex = new Request("PUT", testIndexName()); + createIndex.setJsonEntity(""" + { + "settings": { + "index": { + "number_of_shards": %shards% + } + } + }""".replace("%shards%", Integer.toString(shards))); + Response response = client().performRequest(createIndex); + assertThat( + entityToMap(response.getEntity(), XContentType.JSON), + matchesMap().entry("shards_acknowledged", true).entry("index", testIndexName()).entry("acknowledged", true) + ); + + StringBuilder b = new StringBuilder(); + for (int i = 0; i < 1000; i++) { + b.append(String.format(Locale.ROOT, """ + {"create":{"_index":"%s"}} + {"@timestamp":"2020-12-12","test":"value%s","value":%d} + """, testIndexName(), i, i)); + } + Request bulk = new Request("POST", "/_bulk"); + bulk.addParameter("refresh", "true"); + bulk.addParameter("filter_path", "errors"); + bulk.setJsonEntity(b.toString()); + response = client().performRequest(bulk); + Assert.assertEquals("{\"errors\":false}", EntityUtils.toString(response.getEntity(), StandardCharsets.UTF_8)); + } } From 9df3a3d186d39d7b431057939f780b79fcea69d7 Mon Sep 17 00:00:00 2001 From: Keith Massey Date: Thu, 1 Aug 2024 15:57:54 -0500 Subject: [PATCH 13/36] Truncating watcher history if it is too large (#111245) --- docs/changelog/111245.yaml | 6 + .../settings/notification-settings.asciidoc | 5 + .../core/watcher/history/WatchRecord.java | 160 +++++++++++++++++- .../integration/HistoryIntegrationTests.java | 69 +++++++- .../elasticsearch/xpack/watcher/Watcher.java | 2 +- .../xpack/watcher/history/HistoryStore.java | 21 ++- .../watcher/history/HistoryStoreTests.java | 108 +++++++++++- 7 files changed, 364 insertions(+), 7 deletions(-) create mode 100644 docs/changelog/111245.yaml diff --git a/docs/changelog/111245.yaml b/docs/changelog/111245.yaml new file mode 100644 index 0000000000000..384373d52cb20 --- /dev/null +++ b/docs/changelog/111245.yaml @@ -0,0 +1,6 @@ +pr: 111245 +summary: Truncating watcher history if it is too large +area: Watcher +type: bug +issues: + - 94745 diff --git a/docs/reference/settings/notification-settings.asciidoc b/docs/reference/settings/notification-settings.asciidoc index 4a48c26974084..145112ef4d27c 100644 --- a/docs/reference/settings/notification-settings.asciidoc +++ b/docs/reference/settings/notification-settings.asciidoc @@ -42,6 +42,11 @@ Specifies the path to a file that contains a key for encrypting sensitive data. If `xpack.watcher.encrypt_sensitive_data` is set to `true`, this setting is required. For more information, see <>. +`xpack.watcher.max.history.record.size`:: +(<>) +The maximum size watcher history record that can be written into the watcher history index. Any larger history record will have some of +its larger fields removed. Defaults to 10mb. + `xpack.http.proxy.host`:: (<>) Specifies the address of the proxy server to use to connect to HTTP services. diff --git a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/watcher/history/WatchRecord.java b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/watcher/history/WatchRecord.java index 64215c0b1acf1..f6e8a80884d66 100644 --- a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/watcher/history/WatchRecord.java +++ b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/watcher/history/WatchRecord.java @@ -9,6 +9,7 @@ import org.elasticsearch.ElasticsearchException; import org.elasticsearch.common.Strings; import org.elasticsearch.core.Nullable; +import org.elasticsearch.core.TimeValue; import org.elasticsearch.xcontent.ParseField; import org.elasticsearch.xcontent.ToXContentObject; import org.elasticsearch.xcontent.XContentBuilder; @@ -21,8 +22,10 @@ import org.elasticsearch.xpack.core.watcher.execution.Wid; import org.elasticsearch.xpack.core.watcher.input.ExecutableInput; import org.elasticsearch.xpack.core.watcher.input.Input; +import org.elasticsearch.xpack.core.watcher.support.WatcherDateTimeUtils; import org.elasticsearch.xpack.core.watcher.support.xcontent.WatcherParams; import org.elasticsearch.xpack.core.watcher.trigger.TriggerEvent; +import org.elasticsearch.xpack.core.watcher.watch.Payload; import org.elasticsearch.xpack.core.watcher.watch.Watch; import org.elasticsearch.xpack.core.watcher.watch.WatchField; @@ -45,13 +48,16 @@ public abstract class WatchRecord implements ToXContentObject { private static final ParseField EXECUTION_RESULT = new ParseField("result"); private static final ParseField EXCEPTION = new ParseField("exception"); private static final ParseField USER = new ParseField("user"); + public static final String TRUNCATED_RECORD_KEY = "truncated"; + public static final String TRUNCATED_RECORD_VALUE = "Watch history record exceeded the value of the " + + "`xpack.watcher.max.history.record.size' setting"; protected final Wid id; protected final Watch watch; private final String nodeId; protected final TriggerEvent triggerEvent; protected final ExecutionState state; - private final String user; + protected final String user; // only emitted to xcontent in "debug" mode protected final Map vars; @@ -254,6 +260,8 @@ public String toString() { return id.toString(); } + public abstract WatchRecord dropLargeFields() throws Exception; + public static class MessageWatchRecord extends WatchRecord { @Nullable private final String[] messages; @@ -299,6 +307,24 @@ public MessageWatchRecord(WatchRecord record, ExecutionState state, String messa } } + private MessageWatchRecord( + Wid id, + TriggerEvent triggerEvent, + ExecutionState state, + Map vars, + ExecutableInput redactedInput, + ExecutableCondition condition, + Map metadata, + Watch watch, + WatchExecutionResult redactedResult, + String nodeId, + String user, + String[] messages + ) { + super(id, triggerEvent, state, vars, redactedInput, condition, metadata, watch, redactedResult, nodeId, user); + this.messages = messages; + } + public String[] messages() { return messages; } @@ -309,10 +335,46 @@ void innerToXContent(XContentBuilder builder, Params params) throws IOException builder.array(MESSAGES.getPreferredName(), messages); } } + + @Override + public WatchRecord dropLargeFields() throws Exception { + return new MessageWatchRecord( + this.id, + this.triggerEvent, + this.state, + this.vars, + this.input == null ? null : getTruncatedInput(), + this.condition, + this.metadata, + this.watch, + this.executionResult == null ? null : getTruncatedWatchExecutionResult(this), + this.getNodeId(), + this.user, + this.messages + ); + } } public static class ExceptionWatchRecord extends WatchRecord { + private ExceptionWatchRecord( + Wid id, + TriggerEvent triggerEvent, + ExecutionState state, + Map vars, + ExecutableInput redactedInput, + ExecutableCondition condition, + Map metadata, + Watch watch, + WatchExecutionResult redactedResult, + String nodeId, + String user, + Exception exception + ) { + super(id, triggerEvent, state, vars, redactedInput, condition, metadata, watch, redactedResult, nodeId, user); + this.exception = exception; + } + private static final Map STACK_TRACE_ENABLED_PARAMS = Map.of( ElasticsearchException.REST_EXCEPTION_SKIP_STACK_TRACE, "false" @@ -356,5 +418,101 @@ void innerToXContent(XContentBuilder builder, Params params) throws IOException } } } + + @Override + public WatchRecord dropLargeFields() throws Exception { + return new ExceptionWatchRecord( + this.id, + triggerEvent, + this.state, + this.vars, + this.input == null ? null : getTruncatedInput(), + this.condition, + this.metadata, + this.watch, + this.executionResult == null ? null : getTruncatedWatchExecutionResult(this), + this.getNodeId(), + this.user, + this.exception + ); + } + } + + /* + * This returns a ExecutableInput whose toXContent() returns no information other than a new TRUNCATED_MESSAGE field. It + * drops other information to avoid having a document that is too large to index into Elasticsearch. + */ + private static ExecutableInput getTruncatedInput() { + return new ExecutableInput<>(new Input() { + @Override + public String type() { + return TRUNCATED_RECORD_KEY; + } + + @Override + public XContentBuilder toXContent(XContentBuilder builder, Params params) throws IOException { + builder.value(TRUNCATED_RECORD_VALUE); + return builder; + } + }) { + @Override + public Input.Result execute(WatchExecutionContext ctx, Payload payload) { + throw new UnsupportedOperationException("Redacted input cannot be executed"); + } + }; + } + + /* + * This returns a WatchExecutionResult whose toXContent() returns minimal information, including a new TRUNCATED_MESSAGE field. It + * drops most other information to avoid having a document that is too large to index into Elasticsearch. + */ + private static WatchExecutionResult getTruncatedWatchExecutionResult(WatchRecord watchRecord) { + WatchExecutionContext watchExecutionContext = new WatchExecutionContext( + watchRecord.id.watchId(), + watchRecord.executionResult.executionTime(), + null, + TimeValue.ZERO + ) { + @Override + public boolean knownWatch() { + return false; + } + + @Override + public boolean simulateAction(String actionId) { + return false; + } + + @Override + public boolean skipThrottling(String actionId) { + return false; + } + + @Override + public boolean shouldBeExecuted() { + return false; + } + + @Override + public boolean recordExecution() { + return false; + } + }; + + return new WatchExecutionResult(watchExecutionContext, watchRecord.executionResult.executionDurationMs()) { + @Override + public XContentBuilder toXContent(XContentBuilder builder, Params params) throws IOException { + builder.startObject(); + WatcherDateTimeUtils.writeDate( + Field.EXECUTION_TIME.getPreferredName(), + builder, + watchRecord.executionResult.executionTime() + ); + builder.field(Field.EXECUTION_DURATION.getPreferredName(), watchRecord.executionResult.executionDurationMs()); + builder.field(TRUNCATED_RECORD_KEY, TRUNCATED_RECORD_VALUE); + builder.endObject(); + return builder; + } + }; } } diff --git a/x-pack/plugin/watcher/src/internalClusterTest/java/org/elasticsearch/xpack/watcher/test/integration/HistoryIntegrationTests.java b/x-pack/plugin/watcher/src/internalClusterTest/java/org/elasticsearch/xpack/watcher/test/integration/HistoryIntegrationTests.java index 19cd37400a01c..ee645e4f32798 100644 --- a/x-pack/plugin/watcher/src/internalClusterTest/java/org/elasticsearch/xpack/watcher/test/integration/HistoryIntegrationTests.java +++ b/x-pack/plugin/watcher/src/internalClusterTest/java/org/elasticsearch/xpack/watcher/test/integration/HistoryIntegrationTests.java @@ -8,6 +8,7 @@ import org.elasticsearch.action.admin.indices.mapping.get.GetMappingsResponse; import org.elasticsearch.action.search.SearchRequestBuilder; +import org.elasticsearch.common.unit.ByteSizeValue; import org.elasticsearch.protocol.xpack.watcher.PutWatchResponse; import org.elasticsearch.search.SearchHit; import org.elasticsearch.search.sort.SortBuilders; @@ -17,6 +18,7 @@ import org.elasticsearch.xpack.core.watcher.actions.ActionStatus; import org.elasticsearch.xpack.core.watcher.client.WatchSourceBuilder; import org.elasticsearch.xpack.core.watcher.history.HistoryStoreField; +import org.elasticsearch.xpack.core.watcher.history.WatchRecord; import org.elasticsearch.xpack.core.watcher.input.Input; import org.elasticsearch.xpack.core.watcher.support.xcontent.XContentSource; import org.elasticsearch.xpack.core.watcher.transport.actions.execute.ExecuteWatchRequestBuilder; @@ -29,6 +31,7 @@ import org.elasticsearch.xpack.watcher.trigger.schedule.IntervalSchedule; import java.util.Locale; +import java.util.Map; import static org.elasticsearch.index.mapper.MapperService.SINGLE_MAPPING_NAME; import static org.elasticsearch.index.query.QueryBuilders.matchAllQuery; @@ -44,6 +47,7 @@ import static org.elasticsearch.xpack.watcher.test.WatcherTestUtils.templateRequest; import static org.elasticsearch.xpack.watcher.trigger.TriggerBuilders.schedule; import static org.elasticsearch.xpack.watcher.trigger.schedule.Schedules.interval; +import static org.hamcrest.Matchers.equalTo; import static org.hamcrest.Matchers.is; import static org.hamcrest.Matchers.nullValue; @@ -225,6 +229,69 @@ public void testThatHistoryContainsStatus() throws Exception { }); } + public void testThatHistoryIsTruncated() throws Exception { + { + /* + * The input for this watch is 1 MB, smaller than the 10 MB default of HistoryStore's MAX_HISTORY_SIZE_SETTING. So we do not + * expect its history record to be truncated. + */ + new PutWatchRequestBuilder(client()).setId("test_watch_small") + .setSource( + watchBuilder().trigger(schedule(interval(5, IntervalSchedule.Interval.Unit.HOURS))) + .input(simpleInput("foo", randomAlphaOfLength((int) ByteSizeValue.ofMb(1).getBytes()))) + .addAction("_logger", loggingAction("#### randomLogging")) + ) + .get(); + new ExecuteWatchRequestBuilder(client()).setId("test_watch_small").setRecordExecution(true).get(); + assertBusy(() -> { + assertResponse(getWatchHistory(), searchResponse -> { + assertHitCount(searchResponse, 1); + SearchHit hit = searchResponse.getHits().getAt(0); + XContentSource source = new XContentSource(hit.getSourceRef(), XContentType.JSON); + Map input = source.getValue("input"); + assertThat(input.containsKey(WatchRecord.TRUNCATED_RECORD_KEY), equalTo(false)); + assertThat(input.containsKey("simple"), equalTo(true)); + Map result = source.getValue("result"); + assertThat(result.containsKey(WatchRecord.TRUNCATED_RECORD_KEY), equalTo(false)); + assertThat(result.containsKey("input"), equalTo(true)); + assertThat(result.containsKey("actions"), equalTo(true)); + assertThat(result.containsKey("condition"), equalTo(true)); + }); + }); + } + { + /* + * The input for this watch is 20 MB, much bigger than the 10 MB default of HistoryStore's MAX_HISTORY_SIZE_SETTING. So we + * expect to see its history record truncated before being stored. + */ + new PutWatchRequestBuilder(client()).setId("test_watch_large") + .setSource( + watchBuilder().trigger(schedule(interval(5, IntervalSchedule.Interval.Unit.HOURS))) + .input(simpleInput("foo", randomAlphaOfLength((int) ByteSizeValue.ofMb(20).getBytes()))) + .addAction("_logger", loggingAction("#### randomLogging")) + ) + .get(); + new ExecuteWatchRequestBuilder(client()).setId("test_watch_large").setRecordExecution(true).get(); + assertBusy(() -> { + assertResponse(getWatchHistory(), searchResponse -> { + assertHitCount(searchResponse, 2); + SearchHit hit = searchResponse.getHits().getAt(1); + XContentSource source = new XContentSource(hit.getSourceRef(), XContentType.JSON); + Map input = source.getValue("input"); + assertThat(input.containsKey(WatchRecord.TRUNCATED_RECORD_KEY), equalTo(true)); + assertThat(input.get(WatchRecord.TRUNCATED_RECORD_KEY), equalTo(WatchRecord.TRUNCATED_RECORD_VALUE)); + assertThat(input.containsKey("simple"), equalTo(false)); + Map result = source.getValue("result"); + assertThat(result.containsKey(WatchRecord.TRUNCATED_RECORD_KEY), equalTo(true)); + assertThat(result.get(WatchRecord.TRUNCATED_RECORD_KEY), equalTo(WatchRecord.TRUNCATED_RECORD_VALUE)); + assertThat(result.containsKey("input"), equalTo(false)); + assertThat(result.containsKey("actions"), equalTo(false)); + assertThat(result.containsKey("condition"), equalTo(false)); + }); + }); + } + } + /* * Returns a SearchRequestBuilder containing up to the default number of watch history records (10) if the .watcher-history* is ready. * Otherwise it throws an AssertionError. @@ -232,7 +299,7 @@ public void testThatHistoryContainsStatus() throws Exception { private SearchRequestBuilder getWatchHistory() { ensureGreen(HistoryStoreField.DATA_STREAM); flushAndRefresh(".watcher-history-*"); - return prepareSearch(".watcher-history-*"); + return prepareSearch(".watcher-history-*").addSort("@timestamp", SortOrder.ASC); } } diff --git a/x-pack/plugin/watcher/src/main/java/org/elasticsearch/xpack/watcher/Watcher.java b/x-pack/plugin/watcher/src/main/java/org/elasticsearch/xpack/watcher/Watcher.java index 2d71aef08ea13..821c92b514667 100644 --- a/x-pack/plugin/watcher/src/main/java/org/elasticsearch/xpack/watcher/Watcher.java +++ b/x-pack/plugin/watcher/src/main/java/org/elasticsearch/xpack/watcher/Watcher.java @@ -490,7 +490,7 @@ public void afterBulk(long executionId, BulkRequest request, Exception failure) .setBulkSize(SETTING_BULK_SIZE.get(settings)) .build(); - HistoryStore historyStore = new HistoryStore(bulkProcessor); + HistoryStore historyStore = new HistoryStore(bulkProcessor, settings); // schedulers final Set> scheduleParsers = new HashSet<>(); diff --git a/x-pack/plugin/watcher/src/main/java/org/elasticsearch/xpack/watcher/history/HistoryStore.java b/x-pack/plugin/watcher/src/main/java/org/elasticsearch/xpack/watcher/history/HistoryStore.java index b4f6d82eab965..d8ba0c7e7a506 100644 --- a/x-pack/plugin/watcher/src/main/java/org/elasticsearch/xpack/watcher/history/HistoryStore.java +++ b/x-pack/plugin/watcher/src/main/java/org/elasticsearch/xpack/watcher/history/HistoryStore.java @@ -13,6 +13,9 @@ import org.elasticsearch.action.index.IndexRequest; import org.elasticsearch.cluster.ClusterState; import org.elasticsearch.cluster.metadata.IndexMetadata; +import org.elasticsearch.common.settings.Setting; +import org.elasticsearch.common.settings.Settings; +import org.elasticsearch.common.unit.ByteSizeValue; import org.elasticsearch.xcontent.XContentBuilder; import org.elasticsearch.xcontent.XContentFactory; import org.elasticsearch.xpack.core.watcher.history.HistoryStoreField; @@ -22,16 +25,24 @@ import java.io.IOException; +import static org.elasticsearch.common.settings.Setting.Property.NodeScope; import static org.elasticsearch.xpack.core.watcher.support.Exceptions.ioException; public class HistoryStore { private static final Logger logger = LogManager.getLogger(HistoryStore.class); + public static final Setting MAX_HISTORY_SIZE_SETTING = Setting.byteSizeSetting( + "xpack.watcher.max.history.record.size", + ByteSizeValue.ofMb(10), + NodeScope + ); private final BulkProcessor2 bulkProcessor; + private final ByteSizeValue maxHistoryRecordSize; - public HistoryStore(BulkProcessor2 bulkProcessor) { + public HistoryStore(BulkProcessor2 bulkProcessor, Settings settings) { this.bulkProcessor = bulkProcessor; + maxHistoryRecordSize = MAX_HISTORY_SIZE_SETTING.get(settings); } /** @@ -41,9 +52,15 @@ public HistoryStore(BulkProcessor2 bulkProcessor) { public void put(WatchRecord watchRecord) throws Exception { try (XContentBuilder builder = XContentFactory.jsonBuilder()) { watchRecord.toXContent(builder, WatcherParams.HIDE_SECRETS); - IndexRequest request = new IndexRequest(HistoryStoreField.DATA_STREAM).id(watchRecord.id().value()).source(builder); request.opType(IndexRequest.OpType.CREATE); + if (request.source().length() > maxHistoryRecordSize.getBytes()) { + WatchRecord redactedWatchRecord = watchRecord.dropLargeFields(); + try (XContentBuilder redactedBuilder = XContentFactory.jsonBuilder()) { + redactedWatchRecord.toXContent(redactedBuilder, WatcherParams.HIDE_SECRETS); + request.source(redactedBuilder); + } + } bulkProcessor.add(request); } catch (IOException ioe) { throw ioException("failed to persist watch record [{}]", ioe, watchRecord); diff --git a/x-pack/plugin/watcher/src/test/java/org/elasticsearch/xpack/watcher/history/HistoryStoreTests.java b/x-pack/plugin/watcher/src/test/java/org/elasticsearch/xpack/watcher/history/HistoryStoreTests.java index 7b2300ed6e892..89968aa2cf19b 100644 --- a/x-pack/plugin/watcher/src/test/java/org/elasticsearch/xpack/watcher/history/HistoryStoreTests.java +++ b/x-pack/plugin/watcher/src/test/java/org/elasticsearch/xpack/watcher/history/HistoryStoreTests.java @@ -19,6 +19,7 @@ import org.elasticsearch.client.internal.Client; import org.elasticsearch.common.settings.MockSecureSettings; import org.elasticsearch.common.settings.Settings; +import org.elasticsearch.common.unit.ByteSizeValue; import org.elasticsearch.common.util.concurrent.ThreadContext; import org.elasticsearch.test.ESTestCase; import org.elasticsearch.threadpool.ThreadPool; @@ -38,6 +39,7 @@ import org.elasticsearch.xpack.watcher.common.http.HttpResponse; import org.elasticsearch.xpack.watcher.notification.jira.JiraAccount; import org.elasticsearch.xpack.watcher.notification.jira.JiraIssue; +import org.elasticsearch.xpack.watcher.test.WatcherTestUtils; import org.elasticsearch.xpack.watcher.trigger.schedule.ScheduleTriggerEvent; import org.junit.Before; import org.mockito.ArgumentCaptor; @@ -45,6 +47,7 @@ import java.time.Instant; import java.time.ZoneOffset; import java.time.ZonedDateTime; +import java.util.Map; import java.util.concurrent.atomic.AtomicBoolean; import static java.util.Collections.emptyMap; @@ -55,6 +58,7 @@ import static org.hamcrest.Matchers.instanceOf; import static org.hamcrest.Matchers.not; import static org.mockito.ArgumentMatchers.any; +import static org.mockito.Mockito.atLeastOnce; import static org.mockito.Mockito.doAnswer; import static org.mockito.Mockito.mock; import static org.mockito.Mockito.verify; @@ -73,9 +77,18 @@ public void init() { when(client.threadPool()).thenReturn(threadPool); when(client.settings()).thenReturn(settings); when(threadPool.getThreadContext()).thenReturn(new ThreadContext(settings)); + historyStore = createHistoryStore(null); + } + + private HistoryStore createHistoryStore(ByteSizeValue maxHistoryRecordSize) { BulkProcessor2.Listener listener = mock(BulkProcessor2.Listener.class); - BulkProcessor2 bulkProcessor = BulkProcessor2.builder(client::bulk, listener, threadPool).setBulkActions(1).build(); - historyStore = new HistoryStore(bulkProcessor); + BulkProcessor2 bulkProcessor = BulkProcessor2.builder(client::bulk, listener, client.threadPool()).setBulkActions(1).build(); + Settings.Builder settingsBuilder = Settings.builder(); + if (maxHistoryRecordSize != null) { + settingsBuilder.put(HistoryStore.MAX_HISTORY_SIZE_SETTING.getKey(), maxHistoryRecordSize); + } + Settings settings = settingsBuilder.build(); + return new HistoryStore(bulkProcessor, settings); } public void testPut() throws Exception { @@ -111,6 +124,97 @@ public void testPut() throws Exception { assertThat(historyItemIndexed.get(), equalTo(true)); } + @SuppressWarnings("unchecked") + public void testPutLargeHistory() throws Exception { + IndexResponse indexResponse = mock(IndexResponse.class); + AtomicBoolean historyRedacted = new AtomicBoolean(false); + doAnswer(invocation -> { + BulkRequest request = (BulkRequest) invocation.getArguments()[0]; + ActionListener listener = (ActionListener) invocation.getArguments()[1]; + IndexRequest indexRequest = (IndexRequest) request.requests().get(0); + Map sourceMap = indexRequest.sourceAsMap(); + if (indexRequest.opType() == OpType.CREATE && indexRequest.index().equals(HistoryStoreField.DATA_STREAM)) { + if (sourceMap.containsKey("input") + && ((Map) sourceMap.get("input")).containsKey(WatchRecord.TRUNCATED_RECORD_KEY) + && sourceMap.containsKey("result") + && ((Map) sourceMap.get("result")).containsKey(WatchRecord.TRUNCATED_RECORD_KEY)) { + assertThat( + ((Map) sourceMap.get("input")).get(WatchRecord.TRUNCATED_RECORD_KEY), + equalTo(WatchRecord.TRUNCATED_RECORD_VALUE) + ); + assertThat( + ((Map) sourceMap.get("result")).get(WatchRecord.TRUNCATED_RECORD_KEY), + equalTo(WatchRecord.TRUNCATED_RECORD_VALUE) + ); + historyRedacted.set(true); + } + listener.onResponse( + new BulkResponse(new BulkItemResponse[] { BulkItemResponse.success(1, OpType.CREATE, indexResponse) }, 1) + ); + } else { + listener.onFailure(new ElasticsearchException("test issue")); + fail("Should never get here"); + } + return null; + }).when(client).bulk(any(), any()); + HistoryStore historyStoreSmallLimit = createHistoryStore(ByteSizeValue.ofBytes(10)); + HistoryStore historyStoreLargeLimit = createHistoryStore(ByteSizeValue.ofBytes(10_000_000)); + { + /* + * First, create a history record with input and results. We expect this to not be truncated when the store has a high limit, + * and we expect it to be truncated when we have the artificially low limit. + */ + WatchExecutionContext context = WatcherTestUtils.createWatchExecutionContext(); + WatchExecutionResult result = new WatchExecutionResult(context, randomNonNegativeLong()); + String message = randomAlphaOfLength(100); + WatchRecord watchRecord = new WatchRecord.MessageWatchRecord(context, result, message); + historyStoreLargeLimit.put(watchRecord); + verify(client, atLeastOnce()).bulk(any(), any()); + assertThat(historyRedacted.get(), equalTo(false)); + historyStoreSmallLimit.put(watchRecord); + verify(client, atLeastOnce()).bulk(any(), any()); + assertThat(historyRedacted.get(), equalTo(true)); + } + { + /* + * Now make sure that we don't blow up when the input and result are null + */ + historyRedacted.set(false); + ZonedDateTime now = Instant.ofEpochMilli(0).atZone(ZoneOffset.UTC); + Wid wid = new Wid("_name", now); + ScheduleTriggerEvent event = new ScheduleTriggerEvent(wid.watchId(), now, now); + WatchRecord watchRecord = new WatchRecord.MessageWatchRecord( + wid, + event, + ExecutionState.EXECUTED, + null, + randomAlphaOfLength(10) + ); + historyStoreLargeLimit.put(watchRecord); + verify(client, atLeastOnce()).bulk(any(), any()); + assertThat(historyRedacted.get(), equalTo(false)); + historyStoreSmallLimit.put(watchRecord); + verify(client, atLeastOnce()).bulk(any(), any()); + assertThat(historyRedacted.get(), equalTo(false)); + } + { + /* + * Now make sure that we don't blow up when the input and result are null + */ + historyRedacted.set(false); + WatchExecutionContext context = WatcherTestUtils.createWatchExecutionContext(); + WatchExecutionResult result = new WatchExecutionResult(context, randomNonNegativeLong()); + Exception exception = new RuntimeException(randomAlphaOfLength(100)); + WatchRecord watchRecord = new WatchRecord.ExceptionWatchRecord(context, result, exception); + historyStoreLargeLimit.put(watchRecord); + verify(client, atLeastOnce()).bulk(any(), any()); + assertThat(historyRedacted.get(), equalTo(false)); + historyStoreSmallLimit.put(watchRecord); + verify(client, atLeastOnce()).bulk(any(), any()); + assertThat(historyRedacted.get(), equalTo(true)); + } + } + public void testStoreWithHideSecrets() throws Exception { HttpClient httpClient = mock(HttpClient.class); when(httpClient.execute(any(HttpRequest.class))).thenReturn(new HttpResponse(HttpStatus.SC_INTERNAL_SERVER_ERROR)); From 155042d09ffe3c40a139dad8697f4cbf0baa6528 Mon Sep 17 00:00:00 2001 From: Oleksandr Kolomiiets Date: Thu, 1 Aug 2024 14:36:18 -0700 Subject: [PATCH 14/36] Add more leaf fields to logsdb data generator (#111469) --- ...ogsIndexModeRandomDataChallengeRestIT.java | 44 +++++-- test/framework/build.gradle | 3 + .../DataGeneratorSpecification.java | 33 ++++-- .../logsdb/datageneration/FieldType.java | 10 +- .../datageneration/arbitrary/Arbitrary.java | 40 ------- .../arbitrary/RandomBasedArbitrary.java | 83 ------------- .../datageneration/datasource/DataSource.java | 49 ++++++++ .../datasource/DataSourceHandler.java | 71 +++++++++++ .../datasource/DataSourceRequest.java | 109 +++++++++++++++++ .../datasource/DataSourceResponse.java | 55 +++++++++ .../DefaultObjectGenerationHandler.java | 64 ++++++++++ .../DefaultPrimitiveTypesHandler.java | 74 ++++++++++++ .../datasource/DefaultWrappersHandler.java | 42 +++++++ .../logsdb/datageneration/fields/Context.java | 31 ++++- .../datageneration/fields/FieldValues.java | 34 ------ .../GenericSubObjectFieldDataGenerator.java | 33 ++++-- .../fields/leaf/ByteFieldDataGenerator.java | 40 +++++++ .../fields/leaf/DoubleFieldDataGenerator.java | 40 +++++++ .../fields/leaf/FloatFieldDataGenerator.java | 40 +++++++ .../leaf/HalfFloatFieldDataGenerator.java | 40 +++++++ .../leaf/IntegerFieldDataGenerator.java | 40 +++++++ .../leaf/KeywordFieldDataGenerator.java | 14 ++- .../fields/leaf/LongFieldDataGenerator.java | 14 ++- .../leaf/ScaledFloatFieldDataGenerator.java | 44 +++++++ .../fields/leaf/ShortFieldDataGenerator.java | 40 +++++++ .../leaf/UnsignedLongFieldDataGenerator.java | 40 +++++++ .../DataGeneratorSnapshotTests.java | 104 +++++++++------- .../datageneration/DataGeneratorTests.java | 112 +++++++++--------- ....java => DefaultWrappersHandlerTests.java} | 16 +-- 29 files changed, 1047 insertions(+), 312 deletions(-) delete mode 100644 test/framework/src/main/java/org/elasticsearch/logsdb/datageneration/arbitrary/Arbitrary.java delete mode 100644 test/framework/src/main/java/org/elasticsearch/logsdb/datageneration/arbitrary/RandomBasedArbitrary.java create mode 100644 test/framework/src/main/java/org/elasticsearch/logsdb/datageneration/datasource/DataSource.java create mode 100644 test/framework/src/main/java/org/elasticsearch/logsdb/datageneration/datasource/DataSourceHandler.java create mode 100644 test/framework/src/main/java/org/elasticsearch/logsdb/datageneration/datasource/DataSourceRequest.java create mode 100644 test/framework/src/main/java/org/elasticsearch/logsdb/datageneration/datasource/DataSourceResponse.java create mode 100644 test/framework/src/main/java/org/elasticsearch/logsdb/datageneration/datasource/DefaultObjectGenerationHandler.java create mode 100644 test/framework/src/main/java/org/elasticsearch/logsdb/datageneration/datasource/DefaultPrimitiveTypesHandler.java create mode 100644 test/framework/src/main/java/org/elasticsearch/logsdb/datageneration/datasource/DefaultWrappersHandler.java delete mode 100644 test/framework/src/main/java/org/elasticsearch/logsdb/datageneration/fields/FieldValues.java create mode 100644 test/framework/src/main/java/org/elasticsearch/logsdb/datageneration/fields/leaf/ByteFieldDataGenerator.java create mode 100644 test/framework/src/main/java/org/elasticsearch/logsdb/datageneration/fields/leaf/DoubleFieldDataGenerator.java create mode 100644 test/framework/src/main/java/org/elasticsearch/logsdb/datageneration/fields/leaf/FloatFieldDataGenerator.java create mode 100644 test/framework/src/main/java/org/elasticsearch/logsdb/datageneration/fields/leaf/HalfFloatFieldDataGenerator.java create mode 100644 test/framework/src/main/java/org/elasticsearch/logsdb/datageneration/fields/leaf/IntegerFieldDataGenerator.java create mode 100644 test/framework/src/main/java/org/elasticsearch/logsdb/datageneration/fields/leaf/ScaledFloatFieldDataGenerator.java create mode 100644 test/framework/src/main/java/org/elasticsearch/logsdb/datageneration/fields/leaf/ShortFieldDataGenerator.java create mode 100644 test/framework/src/main/java/org/elasticsearch/logsdb/datageneration/fields/leaf/UnsignedLongFieldDataGenerator.java rename test/framework/src/test/java/org/elasticsearch/logsdb/datageneration/{FieldValuesTests.java => DefaultWrappersHandlerTests.java} (69%) diff --git a/modules/data-streams/src/javaRestTest/java/org/elasticsearch/datastreams/logsdb/qa/StandardVersusLogsIndexModeRandomDataChallengeRestIT.java b/modules/data-streams/src/javaRestTest/java/org/elasticsearch/datastreams/logsdb/qa/StandardVersusLogsIndexModeRandomDataChallengeRestIT.java index 3c2ee0d7723ed..c1d63b76fc25c 100644 --- a/modules/data-streams/src/javaRestTest/java/org/elasticsearch/datastreams/logsdb/qa/StandardVersusLogsIndexModeRandomDataChallengeRestIT.java +++ b/modules/data-streams/src/javaRestTest/java/org/elasticsearch/datastreams/logsdb/qa/StandardVersusLogsIndexModeRandomDataChallengeRestIT.java @@ -13,7 +13,9 @@ import org.elasticsearch.logsdb.datageneration.DataGenerator; import org.elasticsearch.logsdb.datageneration.DataGeneratorSpecification; import org.elasticsearch.logsdb.datageneration.FieldType; -import org.elasticsearch.logsdb.datageneration.arbitrary.RandomBasedArbitrary; +import org.elasticsearch.logsdb.datageneration.datasource.DataSourceHandler; +import org.elasticsearch.logsdb.datageneration.datasource.DataSourceRequest; +import org.elasticsearch.logsdb.datageneration.datasource.DataSourceResponse; import org.elasticsearch.logsdb.datageneration.fields.PredefinedField; import org.elasticsearch.xcontent.XContentBuilder; import org.elasticsearch.xcontent.XContentFactory; @@ -21,16 +23,21 @@ import java.io.IOException; import java.time.Instant; import java.util.List; +import java.util.function.Function; /** * Challenge test (see {@link StandardVersusLogsIndexModeChallengeRestIT}) that uses randomly generated * mapping and documents in order to cover more code paths and permutations. */ public class StandardVersusLogsIndexModeRandomDataChallengeRestIT extends StandardVersusLogsIndexModeChallengeRestIT { + private final boolean fullyDynamicMapping; + private final DataGenerator dataGenerator; public StandardVersusLogsIndexModeRandomDataChallengeRestIT() { super(); + this.fullyDynamicMapping = randomBoolean(); + this.dataGenerator = new DataGenerator( DataGeneratorSpecification.builder() // Nested fields don't work with subobjects: false. @@ -39,22 +46,40 @@ public StandardVersusLogsIndexModeRandomDataChallengeRestIT() { // Currently matching fails because in synthetic source all fields are flat (given that we have subobjects: false) // but stored source is identical to original document which has nested structure. .withMaxObjectDepth(0) - .withArbitrary(new RandomBasedArbitrary() { + .withDataSourceHandlers(List.of(new DataSourceHandler() { // TODO enable null values // Matcher does not handle nulls currently @Override - public boolean generateNullValue() { - return false; + public DataSourceResponse.NullWrapper handle(DataSourceRequest.NullWrapper request) { + return new DataSourceResponse.NullWrapper(Function.identity()); } // TODO enable arrays // List matcher currently does not apply matching logic recursively // and equality check fails because arrays are sorted in synthetic source. @Override - public boolean generateArrayOfValues() { - return false; + public DataSourceResponse.ArrayWrapper handle(DataSourceRequest.ArrayWrapper request) { + return new DataSourceResponse.ArrayWrapper(Function.identity()); + } + + // TODO enable scaled_float fields + // There a difference in synthetic source (precision loss) + // specific to this fields which matcher can't handle. + @Override + public DataSourceResponse.FieldTypeGenerator handle(DataSourceRequest.FieldTypeGenerator request) { + // Unsigned long is not used with dynamic mapping + // since it can initially look like long + // but later fail to parse once big values arrive. + // Double is not used since it maps to float with dynamic mapping + // resulting in precision loss compared to original source. + var excluded = fullyDynamicMapping + ? List.of(FieldType.DOUBLE, FieldType.SCALED_FLOAT, FieldType.UNSIGNED_LONG) + : List.of(FieldType.SCALED_FLOAT); + return new DataSourceResponse.FieldTypeGenerator( + () -> randomValueOtherThanMany(excluded::contains, () -> randomFrom(FieldType.values())) + ); } - }) + })) .withPredefinedFields(List.of(new PredefinedField("host.name", FieldType.KEYWORD))) .build() ); @@ -62,7 +87,7 @@ public boolean generateArrayOfValues() { @Override public void baselineMappings(XContentBuilder builder) throws IOException { - if (randomBoolean()) { + if (fullyDynamicMapping == false) { dataGenerator.writeMapping(builder); } else { // We want dynamic mapping, but we need host.name to be a keyword instead of text to support aggregations. @@ -81,10 +106,9 @@ public void baselineMappings(XContentBuilder builder) throws IOException { @Override public void contenderMappings(XContentBuilder builder) throws IOException { - if (randomBoolean()) { + if (fullyDynamicMapping == false) { dataGenerator.writeMapping(builder, b -> builder.field("subobjects", false)); } else { - // Sometimes we go with full dynamic mapping. builder.startObject(); builder.field("subobjects", false); builder.endObject(); diff --git a/test/framework/build.gradle b/test/framework/build.gradle index 4d598a00de7b6..c8d4aba10b478 100644 --- a/test/framework/build.gradle +++ b/test/framework/build.gradle @@ -31,6 +31,9 @@ dependencies { api 'org.objenesis:objenesis:3.3' api "org.elasticsearch:mocksocket:${versions.mocksocket}" + + testImplementation project(':x-pack:plugin:mapper-unsigned-long') + testImplementation project(":modules:mapper-extras") } sourceSets { diff --git a/test/framework/src/main/java/org/elasticsearch/logsdb/datageneration/DataGeneratorSpecification.java b/test/framework/src/main/java/org/elasticsearch/logsdb/datageneration/DataGeneratorSpecification.java index ea47ad3be1fa6..57bf9f12ccef1 100644 --- a/test/framework/src/main/java/org/elasticsearch/logsdb/datageneration/DataGeneratorSpecification.java +++ b/test/framework/src/main/java/org/elasticsearch/logsdb/datageneration/DataGeneratorSpecification.java @@ -8,16 +8,17 @@ package org.elasticsearch.logsdb.datageneration; -import org.elasticsearch.logsdb.datageneration.arbitrary.Arbitrary; -import org.elasticsearch.logsdb.datageneration.arbitrary.RandomBasedArbitrary; +import org.elasticsearch.logsdb.datageneration.datasource.DataSource; +import org.elasticsearch.logsdb.datageneration.datasource.DataSourceHandler; import org.elasticsearch.logsdb.datageneration.fields.PredefinedField; import java.util.ArrayList; +import java.util.Collection; import java.util.List; /** * Allows configuring behavior of {@link DataGenerator}. - * @param arbitrary provides arbitrary values used during generation + * @param dataSource source of generated data * @param maxFieldCountPerLevel maximum number of fields that an individual object in mapping has. * Applies to subobjects. * @param maxObjectDepth maximum depth of nested objects @@ -25,7 +26,7 @@ * @param predefinedFields predefined fields that must be present in mapping and documents. Only top level fields are supported. */ public record DataGeneratorSpecification( - Arbitrary arbitrary, + DataSource dataSource, int maxFieldCountPerLevel, int maxObjectDepth, int nestedFieldsLimit, @@ -41,24 +42,24 @@ public static DataGeneratorSpecification buildDefault() { } public static class Builder { - private Arbitrary arbitrary; + private List dataSourceHandlers; private int maxFieldCountPerLevel; private int maxObjectDepth; private int nestedFieldsLimit; private List predefinedFields; public Builder() { - arbitrary = new RandomBasedArbitrary(); + this.dataSourceHandlers = new ArrayList<>(); // Simply sufficiently big numbers to get some permutations - maxFieldCountPerLevel = 50; - maxObjectDepth = 2; + this.maxFieldCountPerLevel = 50; + this.maxObjectDepth = 2; // Default value of index.mapping.nested_fields.limit - nestedFieldsLimit = 50; - predefinedFields = new ArrayList<>(); + this.nestedFieldsLimit = 50; + this.predefinedFields = new ArrayList<>(); } - public Builder withArbitrary(Arbitrary arbitrary) { - this.arbitrary = arbitrary; + public Builder withDataSourceHandlers(Collection handlers) { + this.dataSourceHandlers.addAll(handlers); return this; } @@ -83,7 +84,13 @@ public Builder withPredefinedFields(List predefinedFields) { } public DataGeneratorSpecification build() { - return new DataGeneratorSpecification(arbitrary, maxFieldCountPerLevel, maxObjectDepth, nestedFieldsLimit, predefinedFields); + return new DataGeneratorSpecification( + new DataSource(dataSourceHandlers), + maxFieldCountPerLevel, + maxObjectDepth, + nestedFieldsLimit, + predefinedFields + ); } } } diff --git a/test/framework/src/main/java/org/elasticsearch/logsdb/datageneration/FieldType.java b/test/framework/src/main/java/org/elasticsearch/logsdb/datageneration/FieldType.java index 0a675d85077e4..c8821c087d084 100644 --- a/test/framework/src/main/java/org/elasticsearch/logsdb/datageneration/FieldType.java +++ b/test/framework/src/main/java/org/elasticsearch/logsdb/datageneration/FieldType.java @@ -13,5 +13,13 @@ */ public enum FieldType { KEYWORD, - LONG + LONG, + UNSIGNED_LONG, + INTEGER, + SHORT, + BYTE, + DOUBLE, + FLOAT, + HALF_FLOAT, + SCALED_FLOAT } diff --git a/test/framework/src/main/java/org/elasticsearch/logsdb/datageneration/arbitrary/Arbitrary.java b/test/framework/src/main/java/org/elasticsearch/logsdb/datageneration/arbitrary/Arbitrary.java deleted file mode 100644 index 7a4bb880c5335..0000000000000 --- a/test/framework/src/main/java/org/elasticsearch/logsdb/datageneration/arbitrary/Arbitrary.java +++ /dev/null @@ -1,40 +0,0 @@ -/* - * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one - * or more contributor license agreements. Licensed under the Elastic License - * 2.0 and the Server Side Public License, v 1; you may not use this file except - * in compliance with, at your election, the Elastic License 2.0 or the Server - * Side Public License, v 1. - */ - -package org.elasticsearch.logsdb.datageneration.arbitrary; - -import org.elasticsearch.logsdb.datageneration.FieldType; - -/** - * Provides arbitrary values for different purposes. - */ -public interface Arbitrary { - boolean generateSubObject(); - - boolean generateNestedObject(); - - int childFieldCount(int lowerBound, int upperBound); - - String fieldName(int lengthLowerBound, int lengthUpperBound); - - FieldType fieldType(); - - long longValue(); - - String stringValue(int lengthLowerBound, int lengthUpperBound); - - boolean generateNullValue(); - - boolean generateArrayOfValues(); - - int valueArraySize(); - - boolean generateArrayOfObjects(); - - int objectArraySize(); -} diff --git a/test/framework/src/main/java/org/elasticsearch/logsdb/datageneration/arbitrary/RandomBasedArbitrary.java b/test/framework/src/main/java/org/elasticsearch/logsdb/datageneration/arbitrary/RandomBasedArbitrary.java deleted file mode 100644 index 257bd17fc1892..0000000000000 --- a/test/framework/src/main/java/org/elasticsearch/logsdb/datageneration/arbitrary/RandomBasedArbitrary.java +++ /dev/null @@ -1,83 +0,0 @@ -/* - * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one - * or more contributor license agreements. Licensed under the Elastic License - * 2.0 and the Server Side Public License, v 1; you may not use this file except - * in compliance with, at your election, the Elastic License 2.0 or the Server - * Side Public License, v 1. - */ - -package org.elasticsearch.logsdb.datageneration.arbitrary; - -import org.elasticsearch.logsdb.datageneration.FieldType; - -import static org.elasticsearch.test.ESTestCase.randomAlphaOfLengthBetween; -import static org.elasticsearch.test.ESTestCase.randomBoolean; -import static org.elasticsearch.test.ESTestCase.randomDouble; -import static org.elasticsearch.test.ESTestCase.randomFrom; -import static org.elasticsearch.test.ESTestCase.randomIntBetween; -import static org.elasticsearch.test.ESTestCase.randomLong; - -public class RandomBasedArbitrary implements Arbitrary { - @Override - public boolean generateSubObject() { - // Using a static 10% change, this is just a chosen value that can be tweaked. - return randomDouble() <= 0.1; - } - - @Override - public boolean generateNestedObject() { - // Using a static 10% change, this is just a chosen value that can be tweaked. - return randomDouble() <= 0.1; - } - - @Override - public int childFieldCount(int lowerBound, int upperBound) { - return randomIntBetween(lowerBound, upperBound); - } - - @Override - public String fieldName(int lengthLowerBound, int lengthUpperBound) { - return randomAlphaOfLengthBetween(lengthLowerBound, lengthUpperBound); - } - - @Override - public FieldType fieldType() { - return randomFrom(FieldType.values()); - } - - @Override - public long longValue() { - return randomLong(); - } - - @Override - public String stringValue(int lengthLowerBound, int lengthUpperBound) { - return randomAlphaOfLengthBetween(lengthLowerBound, lengthUpperBound); - } - - @Override - public boolean generateNullValue() { - // Using a static 10% chance, this is just a chosen value that can be tweaked. - return randomDouble() < 0.1; - } - - @Override - public boolean generateArrayOfValues() { - return randomBoolean(); - } - - @Override - public int valueArraySize() { - return randomIntBetween(0, 5); - } - - @Override - public boolean generateArrayOfObjects() { - return randomBoolean(); - } - - @Override - public int objectArraySize() { - return randomIntBetween(0, 5); - } -} diff --git a/test/framework/src/main/java/org/elasticsearch/logsdb/datageneration/datasource/DataSource.java b/test/framework/src/main/java/org/elasticsearch/logsdb/datageneration/datasource/DataSource.java new file mode 100644 index 0000000000000..f53b8169f6b70 --- /dev/null +++ b/test/framework/src/main/java/org/elasticsearch/logsdb/datageneration/datasource/DataSource.java @@ -0,0 +1,49 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License + * 2.0 and the Server Side Public License, v 1; you may not use this file except + * in compliance with, at your election, the Elastic License 2.0 or the Server + * Side Public License, v 1. + */ + +package org.elasticsearch.logsdb.datageneration.datasource; + +import java.util.ArrayList; +import java.util.Collection; +import java.util.List; + +/** + * This class handles any decision performed during data generation that changes the output. + * For example: generating a random number, array of random size, mapping parameter. + *

+ * Goals of this abstraction are: + *

    + *
  • to be able to easily add new types of decisions/generators
  • + *
  • to decouple different types of decisions from each other, adding new data type should be an isolated additive change
  • + *
  • to allow overriding only small specific subset of behavior (e.g. for testing purposes)
  • + *
+ */ +public class DataSource { + private List handlers; + + public DataSource(Collection additionalHandlers) { + this.handlers = new ArrayList<>(); + + this.handlers.addAll(additionalHandlers); + + this.handlers.add(new DefaultPrimitiveTypesHandler()); + this.handlers.add(new DefaultWrappersHandler()); + this.handlers.add(new DefaultObjectGenerationHandler()); + } + + public T get(DataSourceRequest request) { + for (var handler : handlers) { + var response = request.accept(handler); + if (response != null) { + return response; + } + } + + throw new IllegalStateException("Request is not supported by data source"); + } +} diff --git a/test/framework/src/main/java/org/elasticsearch/logsdb/datageneration/datasource/DataSourceHandler.java b/test/framework/src/main/java/org/elasticsearch/logsdb/datageneration/datasource/DataSourceHandler.java new file mode 100644 index 0000000000000..1ee587159ee5f --- /dev/null +++ b/test/framework/src/main/java/org/elasticsearch/logsdb/datageneration/datasource/DataSourceHandler.java @@ -0,0 +1,71 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License + * 2.0 and the Server Side Public License, v 1; you may not use this file except + * in compliance with, at your election, the Elastic License 2.0 or the Server + * Side Public License, v 1. + */ + +package org.elasticsearch.logsdb.datageneration.datasource; + +public interface DataSourceHandler { + default DataSourceResponse.LongGenerator handle(DataSourceRequest.LongGenerator request) { + return null; + } + + default DataSourceResponse.UnsignedLongGenerator handle(DataSourceRequest.UnsignedLongGenerator request) { + return null; + } + + default DataSourceResponse.IntegerGenerator handle(DataSourceRequest.IntegerGenerator request) { + return null; + } + + default DataSourceResponse.ShortGenerator handle(DataSourceRequest.ShortGenerator request) { + return null; + } + + default DataSourceResponse.ByteGenerator handle(DataSourceRequest.ByteGenerator request) { + return null; + } + + default DataSourceResponse.DoubleGenerator handle(DataSourceRequest.DoubleGenerator request) { + return null; + } + + default DataSourceResponse.DoubleInRangeGenerator handle(DataSourceRequest.DoubleInRangeGenerator request) { + return null; + } + + default DataSourceResponse.FloatGenerator handle(DataSourceRequest.FloatGenerator request) { + return null; + } + + default DataSourceResponse.HalfFloatGenerator handle(DataSourceRequest.HalfFloatGenerator request) { + return null; + } + + default DataSourceResponse.StringGenerator handle(DataSourceRequest.StringGenerator request) { + return null; + } + + default DataSourceResponse.NullWrapper handle(DataSourceRequest.NullWrapper request) { + return null; + } + + default DataSourceResponse.ArrayWrapper handle(DataSourceRequest.ArrayWrapper request) { + return null; + } + + default DataSourceResponse.ChildFieldGenerator handle(DataSourceRequest.ChildFieldGenerator request) { + return null; + } + + default DataSourceResponse.FieldTypeGenerator handle(DataSourceRequest.FieldTypeGenerator request) { + return null; + } + + default DataSourceResponse.ObjectArrayGenerator handle(DataSourceRequest.ObjectArrayGenerator request) { + return null; + } +} diff --git a/test/framework/src/main/java/org/elasticsearch/logsdb/datageneration/datasource/DataSourceRequest.java b/test/framework/src/main/java/org/elasticsearch/logsdb/datageneration/datasource/DataSourceRequest.java new file mode 100644 index 0000000000000..d28ce7033578c --- /dev/null +++ b/test/framework/src/main/java/org/elasticsearch/logsdb/datageneration/datasource/DataSourceRequest.java @@ -0,0 +1,109 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License + * 2.0 and the Server Side Public License, v 1; you may not use this file except + * in compliance with, at your election, the Elastic License 2.0 or the Server + * Side Public License, v 1. + */ + +package org.elasticsearch.logsdb.datageneration.datasource; + +import org.elasticsearch.logsdb.datageneration.DataGeneratorSpecification; + +public interface DataSourceRequest { + TResponse accept(DataSourceHandler handler); + + record LongGenerator() implements DataSourceRequest { + public DataSourceResponse.LongGenerator accept(DataSourceHandler handler) { + return handler.handle(this); + } + } + + record UnsignedLongGenerator() implements DataSourceRequest { + public DataSourceResponse.UnsignedLongGenerator accept(DataSourceHandler handler) { + return handler.handle(this); + } + } + + record IntegerGenerator() implements DataSourceRequest { + public DataSourceResponse.IntegerGenerator accept(DataSourceHandler handler) { + return handler.handle(this); + } + } + + record ShortGenerator() implements DataSourceRequest { + public DataSourceResponse.ShortGenerator accept(DataSourceHandler handler) { + return handler.handle(this); + } + } + + record ByteGenerator() implements DataSourceRequest { + public DataSourceResponse.ByteGenerator accept(DataSourceHandler handler) { + return handler.handle(this); + } + } + + record DoubleGenerator() implements DataSourceRequest { + public DataSourceResponse.DoubleGenerator accept(DataSourceHandler handler) { + return handler.handle(this); + } + } + + record DoubleInRangeGenerator(double minExclusive, double maxExclusive) + implements + DataSourceRequest { + public DataSourceResponse.DoubleInRangeGenerator accept(DataSourceHandler handler) { + return handler.handle(this); + } + } + + record FloatGenerator() implements DataSourceRequest { + public DataSourceResponse.FloatGenerator accept(DataSourceHandler handler) { + return handler.handle(this); + } + } + + record HalfFloatGenerator() implements DataSourceRequest { + public DataSourceResponse.HalfFloatGenerator accept(DataSourceHandler handler) { + return handler.handle(this); + } + } + + record StringGenerator() implements DataSourceRequest { + public DataSourceResponse.StringGenerator accept(DataSourceHandler handler) { + return handler.handle(this); + } + } + + record NullWrapper() implements DataSourceRequest { + public DataSourceResponse.NullWrapper accept(DataSourceHandler handler) { + return handler.handle(this); + } + } + + record ArrayWrapper() implements DataSourceRequest { + public DataSourceResponse.ArrayWrapper accept(DataSourceHandler handler) { + return handler.handle(this); + } + } + + record ChildFieldGenerator(DataGeneratorSpecification specification) + implements + DataSourceRequest { + public DataSourceResponse.ChildFieldGenerator accept(DataSourceHandler handler) { + return handler.handle(this); + } + } + + record FieldTypeGenerator() implements DataSourceRequest { + public DataSourceResponse.FieldTypeGenerator accept(DataSourceHandler handler) { + return handler.handle(this); + } + } + + record ObjectArrayGenerator() implements DataSourceRequest { + public DataSourceResponse.ObjectArrayGenerator accept(DataSourceHandler handler) { + return handler.handle(this); + } + } +} diff --git a/test/framework/src/main/java/org/elasticsearch/logsdb/datageneration/datasource/DataSourceResponse.java b/test/framework/src/main/java/org/elasticsearch/logsdb/datageneration/datasource/DataSourceResponse.java new file mode 100644 index 0000000000000..867bb9603ca00 --- /dev/null +++ b/test/framework/src/main/java/org/elasticsearch/logsdb/datageneration/datasource/DataSourceResponse.java @@ -0,0 +1,55 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License + * 2.0 and the Server Side Public License, v 1; you may not use this file except + * in compliance with, at your election, the Elastic License 2.0 or the Server + * Side Public License, v 1. + */ + +package org.elasticsearch.logsdb.datageneration.datasource; + +import org.elasticsearch.logsdb.datageneration.FieldType; + +import java.util.Optional; +import java.util.function.Function; +import java.util.function.Supplier; + +public interface DataSourceResponse { + record LongGenerator(Supplier generator) implements DataSourceResponse {} + + record UnsignedLongGenerator(Supplier generator) implements DataSourceResponse {} + + record IntegerGenerator(Supplier generator) implements DataSourceResponse {} + + record ShortGenerator(Supplier generator) implements DataSourceResponse {} + + record ByteGenerator(Supplier generator) implements DataSourceResponse {} + + record DoubleGenerator(Supplier generator) implements DataSourceResponse {} + + record DoubleInRangeGenerator(Supplier generator) implements DataSourceResponse {} + + record FloatGenerator(Supplier generator) implements DataSourceResponse {} + + record HalfFloatGenerator(Supplier generator) implements DataSourceResponse {} + + record StringGenerator(Supplier generator) implements DataSourceResponse {} + + record NullWrapper(Function, Supplier> wrapper) implements DataSourceResponse {} + + record ArrayWrapper(Function, Supplier> wrapper) implements DataSourceResponse {} + + interface ChildFieldGenerator extends DataSourceResponse { + int generateChildFieldCount(); + + boolean generateNestedSubObject(); + + boolean generateRegularSubObject(); + + String generateFieldName(); + } + + record FieldTypeGenerator(Supplier generator) implements DataSourceResponse {} + + record ObjectArrayGenerator(Supplier> lengthGenerator) implements DataSourceResponse {} +} diff --git a/test/framework/src/main/java/org/elasticsearch/logsdb/datageneration/datasource/DefaultObjectGenerationHandler.java b/test/framework/src/main/java/org/elasticsearch/logsdb/datageneration/datasource/DefaultObjectGenerationHandler.java new file mode 100644 index 0000000000000..45e4b0b6d6624 --- /dev/null +++ b/test/framework/src/main/java/org/elasticsearch/logsdb/datageneration/datasource/DefaultObjectGenerationHandler.java @@ -0,0 +1,64 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License + * 2.0 and the Server Side Public License, v 1; you may not use this file except + * in compliance with, at your election, the Elastic License 2.0 or the Server + * Side Public License, v 1. + */ + +package org.elasticsearch.logsdb.datageneration.datasource; + +import org.elasticsearch.logsdb.datageneration.FieldType; +import org.elasticsearch.test.ESTestCase; + +import java.util.Optional; + +import static org.elasticsearch.test.ESTestCase.randomAlphaOfLengthBetween; +import static org.elasticsearch.test.ESTestCase.randomDouble; +import static org.elasticsearch.test.ESTestCase.randomFrom; +import static org.elasticsearch.test.ESTestCase.randomIntBetween; + +public class DefaultObjectGenerationHandler implements DataSourceHandler { + @Override + public DataSourceResponse.ChildFieldGenerator handle(DataSourceRequest.ChildFieldGenerator request) { + return new DataSourceResponse.ChildFieldGenerator() { + @Override + public int generateChildFieldCount() { + return ESTestCase.randomIntBetween(0, request.specification().maxFieldCountPerLevel()); + } + + @Override + public boolean generateNestedSubObject() { + // Using a static 10% change, this is just a chosen value that can be tweaked. + return randomDouble() <= 0.1; + } + + @Override + public boolean generateRegularSubObject() { + // Using a static 10% change, this is just a chosen value that can be tweaked. + return randomDouble() <= 0.1; + } + + @Override + public String generateFieldName() { + return randomAlphaOfLengthBetween(1, 10); + } + }; + } + + @Override + public DataSourceResponse.FieldTypeGenerator handle(DataSourceRequest.FieldTypeGenerator request) { + return new DataSourceResponse.FieldTypeGenerator(() -> randomFrom(FieldType.values())); + } + + @Override + public DataSourceResponse.ObjectArrayGenerator handle(DataSourceRequest.ObjectArrayGenerator request) { + return new DataSourceResponse.ObjectArrayGenerator(() -> { + if (ESTestCase.randomBoolean()) { + return Optional.of(randomIntBetween(0, 5)); + } + + return Optional.empty(); + }); + } +} diff --git a/test/framework/src/main/java/org/elasticsearch/logsdb/datageneration/datasource/DefaultPrimitiveTypesHandler.java b/test/framework/src/main/java/org/elasticsearch/logsdb/datageneration/datasource/DefaultPrimitiveTypesHandler.java new file mode 100644 index 0000000000000..c9e581f973aae --- /dev/null +++ b/test/framework/src/main/java/org/elasticsearch/logsdb/datageneration/datasource/DefaultPrimitiveTypesHandler.java @@ -0,0 +1,74 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License + * 2.0 and the Server Side Public License, v 1; you may not use this file except + * in compliance with, at your election, the Elastic License 2.0 or the Server + * Side Public License, v 1. + */ + +package org.elasticsearch.logsdb.datageneration.datasource; + +import org.apache.lucene.sandbox.document.HalfFloatPoint; +import org.elasticsearch.test.ESTestCase; + +import java.math.BigInteger; + +public class DefaultPrimitiveTypesHandler implements DataSourceHandler { + @Override + public DataSourceResponse.LongGenerator handle(DataSourceRequest.LongGenerator request) { + return new DataSourceResponse.LongGenerator(ESTestCase::randomLong); + } + + @Override + public DataSourceResponse.UnsignedLongGenerator handle(DataSourceRequest.UnsignedLongGenerator request) { + return new DataSourceResponse.UnsignedLongGenerator(() -> new BigInteger(64, ESTestCase.random())); + } + + @Override + public DataSourceResponse.IntegerGenerator handle(DataSourceRequest.IntegerGenerator request) { + return new DataSourceResponse.IntegerGenerator(ESTestCase::randomInt); + } + + @Override + public DataSourceResponse.ShortGenerator handle(DataSourceRequest.ShortGenerator request) { + return new DataSourceResponse.ShortGenerator(ESTestCase::randomShort); + } + + @Override + public DataSourceResponse.ByteGenerator handle(DataSourceRequest.ByteGenerator request) { + return new DataSourceResponse.ByteGenerator(ESTestCase::randomByte); + } + + @Override + public DataSourceResponse.DoubleGenerator handle(DataSourceRequest.DoubleGenerator request) { + return new DataSourceResponse.DoubleGenerator(ESTestCase::randomDouble); + } + + @Override + public DataSourceResponse.DoubleInRangeGenerator handle(DataSourceRequest.DoubleInRangeGenerator request) { + return new DataSourceResponse.DoubleInRangeGenerator( + () -> ESTestCase.randomDoubleBetween(request.minExclusive(), request.maxExclusive(), false) + ); + } + + @Override + public DataSourceResponse.FloatGenerator handle(DataSourceRequest.FloatGenerator request) { + return new DataSourceResponse.FloatGenerator(ESTestCase::randomFloat); + } + + @Override + public DataSourceResponse.HalfFloatGenerator handle(DataSourceRequest.HalfFloatGenerator request) { + // This trick taken from NumberFieldMapper reduces precision of float to actual half float precision. + // We do this to avoid getting tripped on values in synthetic source having reduced precision but + // values in stored source having full float precision. + // This can be removed with a more lenient matcher. + return new DataSourceResponse.HalfFloatGenerator( + () -> HalfFloatPoint.sortableShortToHalfFloat(HalfFloatPoint.halfFloatToSortableShort(ESTestCase.randomFloat())) + ); + } + + @Override + public DataSourceResponse.StringGenerator handle(DataSourceRequest.StringGenerator request) { + return new DataSourceResponse.StringGenerator(() -> ESTestCase.randomAlphaOfLengthBetween(0, 50)); + } +} diff --git a/test/framework/src/main/java/org/elasticsearch/logsdb/datageneration/datasource/DefaultWrappersHandler.java b/test/framework/src/main/java/org/elasticsearch/logsdb/datageneration/datasource/DefaultWrappersHandler.java new file mode 100644 index 0000000000000..57af9786f200b --- /dev/null +++ b/test/framework/src/main/java/org/elasticsearch/logsdb/datageneration/datasource/DefaultWrappersHandler.java @@ -0,0 +1,42 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License + * 2.0 and the Server Side Public License, v 1; you may not use this file except + * in compliance with, at your election, the Elastic License 2.0 or the Server + * Side Public License, v 1. + */ + +package org.elasticsearch.logsdb.datageneration.datasource; + +import org.elasticsearch.test.ESTestCase; + +import java.util.function.Function; +import java.util.function.Supplier; +import java.util.stream.IntStream; + +public class DefaultWrappersHandler implements DataSourceHandler { + @Override + public DataSourceResponse.NullWrapper handle(DataSourceRequest.NullWrapper ignored) { + return new DataSourceResponse.NullWrapper(injectNulls()); + } + + @Override + public DataSourceResponse.ArrayWrapper handle(DataSourceRequest.ArrayWrapper ignored) { + return new DataSourceResponse.ArrayWrapper(wrapInArray()); + } + + private static Function, Supplier> injectNulls() { + return (values) -> () -> ESTestCase.randomBoolean() ? null : values.get(); + } + + private static Function, Supplier> wrapInArray() { + return (values) -> () -> { + if (ESTestCase.randomBoolean()) { + var size = ESTestCase.randomIntBetween(0, 5); + return IntStream.range(0, size).mapToObj((i) -> values.get()).toList(); + } + + return values.get(); + }; + } +} diff --git a/test/framework/src/main/java/org/elasticsearch/logsdb/datageneration/fields/Context.java b/test/framework/src/main/java/org/elasticsearch/logsdb/datageneration/fields/Context.java index b257807890c00..647d5bff152d1 100644 --- a/test/framework/src/main/java/org/elasticsearch/logsdb/datageneration/fields/Context.java +++ b/test/framework/src/main/java/org/elasticsearch/logsdb/datageneration/fields/Context.java @@ -9,9 +9,17 @@ package org.elasticsearch.logsdb.datageneration.fields; import org.elasticsearch.logsdb.datageneration.DataGeneratorSpecification; +import org.elasticsearch.logsdb.datageneration.datasource.DataSourceRequest; +import org.elasticsearch.logsdb.datageneration.datasource.DataSourceResponse; + +import java.util.Optional; class Context { private final DataGeneratorSpecification specification; + + private final DataSourceResponse.ChildFieldGenerator childFieldGenerator; + private final DataSourceResponse.FieldTypeGenerator fieldTypeGenerator; + private final DataSourceResponse.ObjectArrayGenerator objectArrayGenerator; private final int objectDepth; private final int nestedFieldsCount; @@ -21,6 +29,9 @@ class Context { private Context(DataGeneratorSpecification specification, int objectDepth, int nestedFieldsCount) { this.specification = specification; + this.childFieldGenerator = specification.dataSource().get(new DataSourceRequest.ChildFieldGenerator(specification)); + this.fieldTypeGenerator = specification.dataSource().get(new DataSourceRequest.FieldTypeGenerator()); + this.objectArrayGenerator = specification.dataSource().get(new DataSourceRequest.ObjectArrayGenerator()); this.objectDepth = objectDepth; this.nestedFieldsCount = nestedFieldsCount; } @@ -29,6 +40,14 @@ public DataGeneratorSpecification specification() { return specification; } + public DataSourceResponse.ChildFieldGenerator childFieldGenerator() { + return childFieldGenerator; + } + + public DataSourceResponse.FieldTypeGenerator fieldTypeGenerator() { + return fieldTypeGenerator; + } + public Context subObject() { return new Context(specification, objectDepth + 1, nestedFieldsCount); } @@ -38,16 +57,20 @@ public Context nestedObject() { } public boolean shouldAddObjectField() { - return specification.arbitrary().generateSubObject() && objectDepth < specification.maxObjectDepth(); + return childFieldGenerator.generateRegularSubObject() && objectDepth < specification.maxObjectDepth(); } public boolean shouldAddNestedField() { - return specification.arbitrary().generateNestedObject() + return childFieldGenerator.generateNestedSubObject() && objectDepth < specification.maxObjectDepth() && nestedFieldsCount < specification.nestedFieldsLimit(); } - public boolean shouldGenerateObjectArray() { - return objectDepth > 0 && specification.arbitrary().generateArrayOfObjects(); + public Optional generateObjectArray() { + if (objectDepth == 0) { + return Optional.empty(); + } + + return objectArrayGenerator.lengthGenerator().get(); } } diff --git a/test/framework/src/main/java/org/elasticsearch/logsdb/datageneration/fields/FieldValues.java b/test/framework/src/main/java/org/elasticsearch/logsdb/datageneration/fields/FieldValues.java deleted file mode 100644 index 74196c5c8926c..0000000000000 --- a/test/framework/src/main/java/org/elasticsearch/logsdb/datageneration/fields/FieldValues.java +++ /dev/null @@ -1,34 +0,0 @@ -/* - * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one - * or more contributor license agreements. Licensed under the Elastic License - * 2.0 and the Server Side Public License, v 1; you may not use this file except - * in compliance with, at your election, the Elastic License 2.0 or the Server - * Side Public License, v 1. - */ - -package org.elasticsearch.logsdb.datageneration.fields; - -import org.elasticsearch.logsdb.datageneration.arbitrary.Arbitrary; - -import java.util.function.Function; -import java.util.function.Supplier; -import java.util.stream.IntStream; - -public class FieldValues { - private FieldValues() {} - - public static Function, Supplier> injectNulls(Arbitrary arbitrary) { - return (values) -> () -> arbitrary.generateNullValue() ? null : values.get(); - } - - public static Function, Supplier> wrappedInArray(Arbitrary arbitrary) { - return (values) -> () -> { - if (arbitrary.generateArrayOfValues()) { - var size = arbitrary.valueArraySize(); - return IntStream.range(0, size).mapToObj((i) -> values.get()).toList(); - } - - return values.get(); - }; - } -} diff --git a/test/framework/src/main/java/org/elasticsearch/logsdb/datageneration/fields/GenericSubObjectFieldDataGenerator.java b/test/framework/src/main/java/org/elasticsearch/logsdb/datageneration/fields/GenericSubObjectFieldDataGenerator.java index 24f59867f85b8..1a3da3b63add0 100644 --- a/test/framework/src/main/java/org/elasticsearch/logsdb/datageneration/fields/GenericSubObjectFieldDataGenerator.java +++ b/test/framework/src/main/java/org/elasticsearch/logsdb/datageneration/fields/GenericSubObjectFieldDataGenerator.java @@ -11,8 +11,16 @@ import org.elasticsearch.core.CheckedConsumer; import org.elasticsearch.logsdb.datageneration.FieldDataGenerator; import org.elasticsearch.logsdb.datageneration.FieldType; +import org.elasticsearch.logsdb.datageneration.fields.leaf.ByteFieldDataGenerator; +import org.elasticsearch.logsdb.datageneration.fields.leaf.DoubleFieldDataGenerator; +import org.elasticsearch.logsdb.datageneration.fields.leaf.FloatFieldDataGenerator; +import org.elasticsearch.logsdb.datageneration.fields.leaf.HalfFloatFieldDataGenerator; +import org.elasticsearch.logsdb.datageneration.fields.leaf.IntegerFieldDataGenerator; import org.elasticsearch.logsdb.datageneration.fields.leaf.KeywordFieldDataGenerator; import org.elasticsearch.logsdb.datageneration.fields.leaf.LongFieldDataGenerator; +import org.elasticsearch.logsdb.datageneration.fields.leaf.ScaledFloatFieldDataGenerator; +import org.elasticsearch.logsdb.datageneration.fields.leaf.ShortFieldDataGenerator; +import org.elasticsearch.logsdb.datageneration.fields.leaf.UnsignedLongFieldDataGenerator; import org.elasticsearch.xcontent.XContentBuilder; import java.io.IOException; @@ -34,7 +42,7 @@ public class GenericSubObjectFieldDataGenerator { List generateChildFields() { var existingFieldNames = new HashSet(); // no child fields is legal - var childFieldsCount = context.specification().arbitrary().childFieldCount(0, context.specification().maxFieldCountPerLevel()); + var childFieldsCount = context.childFieldGenerator().generateChildFieldCount(); var result = new ArrayList(childFieldsCount); for (int i = 0; i < childFieldsCount; i++) { @@ -45,7 +53,7 @@ List generateChildFields() { } else if (context.shouldAddNestedField()) { result.add(new ChildField(fieldName, new NestedFieldDataGenerator(context.nestedObject()))); } else { - var fieldType = context.specification().arbitrary().fieldType(); + var fieldType = context.fieldTypeGenerator().generator().get(); result.add(leafField(fieldType, fieldName)); } } @@ -66,8 +74,9 @@ static void writeChildFieldsMapping(XContentBuilder mapping, List ch static void writeObjectsData(XContentBuilder document, Context context, CheckedConsumer objectWriter) throws IOException { - if (context.shouldGenerateObjectArray()) { - int size = context.specification().arbitrary().objectArraySize(); + var optionalLength = context.generateObjectArray(); + if (optionalLength.isPresent()) { + int size = optionalLength.get(); document.startArray(); for (int i = 0; i < size; i++) { @@ -94,17 +103,25 @@ static void writeChildFieldsData(XContentBuilder document, Iterable private ChildField leafField(FieldType type, String fieldName) { var generator = switch (type) { - case LONG -> new LongFieldDataGenerator(context.specification().arbitrary()); - case KEYWORD -> new KeywordFieldDataGenerator(context.specification().arbitrary()); + case KEYWORD -> new KeywordFieldDataGenerator(context.specification().dataSource()); + case LONG -> new LongFieldDataGenerator(context.specification().dataSource()); + case UNSIGNED_LONG -> new UnsignedLongFieldDataGenerator(context.specification().dataSource()); + case INTEGER -> new IntegerFieldDataGenerator(context.specification().dataSource()); + case SHORT -> new ShortFieldDataGenerator(context.specification().dataSource()); + case BYTE -> new ByteFieldDataGenerator(context.specification().dataSource()); + case DOUBLE -> new DoubleFieldDataGenerator(context.specification().dataSource()); + case FLOAT -> new FloatFieldDataGenerator(context.specification().dataSource()); + case HALF_FLOAT -> new HalfFloatFieldDataGenerator(context.specification().dataSource()); + case SCALED_FLOAT -> new ScaledFloatFieldDataGenerator(context.specification().dataSource()); }; return new ChildField(fieldName, generator); } private String generateFieldName(Set existingFields) { - var fieldName = context.specification().arbitrary().fieldName(1, 10); + var fieldName = context.childFieldGenerator().generateFieldName(); while (existingFields.contains(fieldName)) { - fieldName = context.specification().arbitrary().fieldName(1, 10); + fieldName = context.childFieldGenerator().generateFieldName(); } existingFields.add(fieldName); diff --git a/test/framework/src/main/java/org/elasticsearch/logsdb/datageneration/fields/leaf/ByteFieldDataGenerator.java b/test/framework/src/main/java/org/elasticsearch/logsdb/datageneration/fields/leaf/ByteFieldDataGenerator.java new file mode 100644 index 0000000000000..07a7bd65b67fb --- /dev/null +++ b/test/framework/src/main/java/org/elasticsearch/logsdb/datageneration/fields/leaf/ByteFieldDataGenerator.java @@ -0,0 +1,40 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License + * 2.0 and the Server Side Public License, v 1; you may not use this file except + * in compliance with, at your election, the Elastic License 2.0 or the Server + * Side Public License, v 1. + */ + +package org.elasticsearch.logsdb.datageneration.fields.leaf; + +import org.elasticsearch.core.CheckedConsumer; +import org.elasticsearch.logsdb.datageneration.FieldDataGenerator; +import org.elasticsearch.logsdb.datageneration.datasource.DataSource; +import org.elasticsearch.logsdb.datageneration.datasource.DataSourceRequest; +import org.elasticsearch.xcontent.XContentBuilder; + +import java.io.IOException; +import java.util.function.Supplier; + +public class ByteFieldDataGenerator implements FieldDataGenerator { + private final Supplier valueGenerator; + + public ByteFieldDataGenerator(DataSource dataSource) { + var bytes = dataSource.get(new DataSourceRequest.ByteGenerator()); + var nulls = dataSource.get(new DataSourceRequest.NullWrapper()); + var arrays = dataSource.get(new DataSourceRequest.ArrayWrapper()); + + this.valueGenerator = arrays.wrapper().compose(nulls.wrapper()).apply(() -> bytes.generator().get()); + } + + @Override + public CheckedConsumer mappingWriter() { + return b -> b.startObject().field("type", "byte").endObject(); + } + + @Override + public CheckedConsumer fieldValueGenerator() { + return b -> b.value(valueGenerator.get()); + } +} diff --git a/test/framework/src/main/java/org/elasticsearch/logsdb/datageneration/fields/leaf/DoubleFieldDataGenerator.java b/test/framework/src/main/java/org/elasticsearch/logsdb/datageneration/fields/leaf/DoubleFieldDataGenerator.java new file mode 100644 index 0000000000000..84c5afe2fae51 --- /dev/null +++ b/test/framework/src/main/java/org/elasticsearch/logsdb/datageneration/fields/leaf/DoubleFieldDataGenerator.java @@ -0,0 +1,40 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License + * 2.0 and the Server Side Public License, v 1; you may not use this file except + * in compliance with, at your election, the Elastic License 2.0 or the Server + * Side Public License, v 1. + */ + +package org.elasticsearch.logsdb.datageneration.fields.leaf; + +import org.elasticsearch.core.CheckedConsumer; +import org.elasticsearch.logsdb.datageneration.FieldDataGenerator; +import org.elasticsearch.logsdb.datageneration.datasource.DataSource; +import org.elasticsearch.logsdb.datageneration.datasource.DataSourceRequest; +import org.elasticsearch.xcontent.XContentBuilder; + +import java.io.IOException; +import java.util.function.Supplier; + +public class DoubleFieldDataGenerator implements FieldDataGenerator { + private final Supplier valueGenerator; + + public DoubleFieldDataGenerator(DataSource dataSource) { + var doubles = dataSource.get(new DataSourceRequest.DoubleGenerator()); + var nulls = dataSource.get(new DataSourceRequest.NullWrapper()); + var arrays = dataSource.get(new DataSourceRequest.ArrayWrapper()); + + this.valueGenerator = arrays.wrapper().compose(nulls.wrapper()).apply(() -> doubles.generator().get()); + } + + @Override + public CheckedConsumer mappingWriter() { + return b -> b.startObject().field("type", "double").endObject(); + } + + @Override + public CheckedConsumer fieldValueGenerator() { + return b -> b.value(valueGenerator.get()); + } +} diff --git a/test/framework/src/main/java/org/elasticsearch/logsdb/datageneration/fields/leaf/FloatFieldDataGenerator.java b/test/framework/src/main/java/org/elasticsearch/logsdb/datageneration/fields/leaf/FloatFieldDataGenerator.java new file mode 100644 index 0000000000000..34e401a99bd0a --- /dev/null +++ b/test/framework/src/main/java/org/elasticsearch/logsdb/datageneration/fields/leaf/FloatFieldDataGenerator.java @@ -0,0 +1,40 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License + * 2.0 and the Server Side Public License, v 1; you may not use this file except + * in compliance with, at your election, the Elastic License 2.0 or the Server + * Side Public License, v 1. + */ + +package org.elasticsearch.logsdb.datageneration.fields.leaf; + +import org.elasticsearch.core.CheckedConsumer; +import org.elasticsearch.logsdb.datageneration.FieldDataGenerator; +import org.elasticsearch.logsdb.datageneration.datasource.DataSource; +import org.elasticsearch.logsdb.datageneration.datasource.DataSourceRequest; +import org.elasticsearch.xcontent.XContentBuilder; + +import java.io.IOException; +import java.util.function.Supplier; + +public class FloatFieldDataGenerator implements FieldDataGenerator { + private final Supplier valueGenerator; + + public FloatFieldDataGenerator(DataSource dataSource) { + var floats = dataSource.get(new DataSourceRequest.FloatGenerator()); + var nulls = dataSource.get(new DataSourceRequest.NullWrapper()); + var arrays = dataSource.get(new DataSourceRequest.ArrayWrapper()); + + this.valueGenerator = arrays.wrapper().compose(nulls.wrapper()).apply(() -> floats.generator().get()); + } + + @Override + public CheckedConsumer mappingWriter() { + return b -> b.startObject().field("type", "float").endObject(); + } + + @Override + public CheckedConsumer fieldValueGenerator() { + return b -> b.value(valueGenerator.get()); + } +} diff --git a/test/framework/src/main/java/org/elasticsearch/logsdb/datageneration/fields/leaf/HalfFloatFieldDataGenerator.java b/test/framework/src/main/java/org/elasticsearch/logsdb/datageneration/fields/leaf/HalfFloatFieldDataGenerator.java new file mode 100644 index 0000000000000..3201926e35041 --- /dev/null +++ b/test/framework/src/main/java/org/elasticsearch/logsdb/datageneration/fields/leaf/HalfFloatFieldDataGenerator.java @@ -0,0 +1,40 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License + * 2.0 and the Server Side Public License, v 1; you may not use this file except + * in compliance with, at your election, the Elastic License 2.0 or the Server + * Side Public License, v 1. + */ + +package org.elasticsearch.logsdb.datageneration.fields.leaf; + +import org.elasticsearch.core.CheckedConsumer; +import org.elasticsearch.logsdb.datageneration.FieldDataGenerator; +import org.elasticsearch.logsdb.datageneration.datasource.DataSource; +import org.elasticsearch.logsdb.datageneration.datasource.DataSourceRequest; +import org.elasticsearch.xcontent.XContentBuilder; + +import java.io.IOException; +import java.util.function.Supplier; + +public class HalfFloatFieldDataGenerator implements FieldDataGenerator { + private final Supplier valueGenerator; + + public HalfFloatFieldDataGenerator(DataSource dataSource) { + var halfFloats = dataSource.get(new DataSourceRequest.HalfFloatGenerator()); + var nulls = dataSource.get(new DataSourceRequest.NullWrapper()); + var arrays = dataSource.get(new DataSourceRequest.ArrayWrapper()); + + this.valueGenerator = arrays.wrapper().compose(nulls.wrapper()).apply(() -> halfFloats.generator().get()); + } + + @Override + public CheckedConsumer mappingWriter() { + return b -> b.startObject().field("type", "half_float").endObject(); + } + + @Override + public CheckedConsumer fieldValueGenerator() { + return b -> b.value(valueGenerator.get()); + } +} diff --git a/test/framework/src/main/java/org/elasticsearch/logsdb/datageneration/fields/leaf/IntegerFieldDataGenerator.java b/test/framework/src/main/java/org/elasticsearch/logsdb/datageneration/fields/leaf/IntegerFieldDataGenerator.java new file mode 100644 index 0000000000000..a532d77abc80e --- /dev/null +++ b/test/framework/src/main/java/org/elasticsearch/logsdb/datageneration/fields/leaf/IntegerFieldDataGenerator.java @@ -0,0 +1,40 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License + * 2.0 and the Server Side Public License, v 1; you may not use this file except + * in compliance with, at your election, the Elastic License 2.0 or the Server + * Side Public License, v 1. + */ + +package org.elasticsearch.logsdb.datageneration.fields.leaf; + +import org.elasticsearch.core.CheckedConsumer; +import org.elasticsearch.logsdb.datageneration.FieldDataGenerator; +import org.elasticsearch.logsdb.datageneration.datasource.DataSource; +import org.elasticsearch.logsdb.datageneration.datasource.DataSourceRequest; +import org.elasticsearch.xcontent.XContentBuilder; + +import java.io.IOException; +import java.util.function.Supplier; + +public class IntegerFieldDataGenerator implements FieldDataGenerator { + private final Supplier valueGenerator; + + public IntegerFieldDataGenerator(DataSource dataSource) { + var ints = dataSource.get(new DataSourceRequest.IntegerGenerator()); + var nulls = dataSource.get(new DataSourceRequest.NullWrapper()); + var arrays = dataSource.get(new DataSourceRequest.ArrayWrapper()); + + this.valueGenerator = arrays.wrapper().compose(nulls.wrapper()).apply(() -> ints.generator().get()); + } + + @Override + public CheckedConsumer mappingWriter() { + return b -> b.startObject().field("type", "integer").endObject(); + } + + @Override + public CheckedConsumer fieldValueGenerator() { + return b -> b.value(valueGenerator.get()); + } +} diff --git a/test/framework/src/main/java/org/elasticsearch/logsdb/datageneration/fields/leaf/KeywordFieldDataGenerator.java b/test/framework/src/main/java/org/elasticsearch/logsdb/datageneration/fields/leaf/KeywordFieldDataGenerator.java index 89ae1d6034c15..913cd5657dc6f 100644 --- a/test/framework/src/main/java/org/elasticsearch/logsdb/datageneration/fields/leaf/KeywordFieldDataGenerator.java +++ b/test/framework/src/main/java/org/elasticsearch/logsdb/datageneration/fields/leaf/KeywordFieldDataGenerator.java @@ -10,20 +10,22 @@ import org.elasticsearch.core.CheckedConsumer; import org.elasticsearch.logsdb.datageneration.FieldDataGenerator; -import org.elasticsearch.logsdb.datageneration.arbitrary.Arbitrary; +import org.elasticsearch.logsdb.datageneration.datasource.DataSource; +import org.elasticsearch.logsdb.datageneration.datasource.DataSourceRequest; import org.elasticsearch.xcontent.XContentBuilder; import java.io.IOException; import java.util.function.Supplier; -import static org.elasticsearch.logsdb.datageneration.fields.FieldValues.injectNulls; -import static org.elasticsearch.logsdb.datageneration.fields.FieldValues.wrappedInArray; - public class KeywordFieldDataGenerator implements FieldDataGenerator { private final Supplier valueGenerator; - public KeywordFieldDataGenerator(Arbitrary arbitrary) { - this.valueGenerator = injectNulls(arbitrary).andThen(wrappedInArray(arbitrary)).apply(() -> arbitrary.stringValue(0, 50)); + public KeywordFieldDataGenerator(DataSource dataSource) { + var strings = dataSource.get(new DataSourceRequest.StringGenerator()); + var nulls = dataSource.get(new DataSourceRequest.NullWrapper()); + var arrays = dataSource.get(new DataSourceRequest.ArrayWrapper()); + + this.valueGenerator = arrays.wrapper().compose(nulls.wrapper()).apply(() -> strings.generator().get()); } @Override diff --git a/test/framework/src/main/java/org/elasticsearch/logsdb/datageneration/fields/leaf/LongFieldDataGenerator.java b/test/framework/src/main/java/org/elasticsearch/logsdb/datageneration/fields/leaf/LongFieldDataGenerator.java index 097c5fe024d2b..3627385f51a7c 100644 --- a/test/framework/src/main/java/org/elasticsearch/logsdb/datageneration/fields/leaf/LongFieldDataGenerator.java +++ b/test/framework/src/main/java/org/elasticsearch/logsdb/datageneration/fields/leaf/LongFieldDataGenerator.java @@ -10,20 +10,22 @@ import org.elasticsearch.core.CheckedConsumer; import org.elasticsearch.logsdb.datageneration.FieldDataGenerator; -import org.elasticsearch.logsdb.datageneration.arbitrary.Arbitrary; +import org.elasticsearch.logsdb.datageneration.datasource.DataSource; +import org.elasticsearch.logsdb.datageneration.datasource.DataSourceRequest; import org.elasticsearch.xcontent.XContentBuilder; import java.io.IOException; import java.util.function.Supplier; -import static org.elasticsearch.logsdb.datageneration.fields.FieldValues.injectNulls; -import static org.elasticsearch.logsdb.datageneration.fields.FieldValues.wrappedInArray; - public class LongFieldDataGenerator implements FieldDataGenerator { private final Supplier valueGenerator; - public LongFieldDataGenerator(Arbitrary arbitrary) { - this.valueGenerator = injectNulls(arbitrary).andThen(wrappedInArray(arbitrary)).apply(arbitrary::longValue); + public LongFieldDataGenerator(DataSource dataSource) { + var longs = dataSource.get(new DataSourceRequest.LongGenerator()); + var nulls = dataSource.get(new DataSourceRequest.NullWrapper()); + var arrays = dataSource.get(new DataSourceRequest.ArrayWrapper()); + + this.valueGenerator = arrays.wrapper().compose(nulls.wrapper()).apply(() -> longs.generator().get()); } @Override diff --git a/test/framework/src/main/java/org/elasticsearch/logsdb/datageneration/fields/leaf/ScaledFloatFieldDataGenerator.java b/test/framework/src/main/java/org/elasticsearch/logsdb/datageneration/fields/leaf/ScaledFloatFieldDataGenerator.java new file mode 100644 index 0000000000000..38fa0504cf7e7 --- /dev/null +++ b/test/framework/src/main/java/org/elasticsearch/logsdb/datageneration/fields/leaf/ScaledFloatFieldDataGenerator.java @@ -0,0 +1,44 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License + * 2.0 and the Server Side Public License, v 1; you may not use this file except + * in compliance with, at your election, the Elastic License 2.0 or the Server + * Side Public License, v 1. + */ + +package org.elasticsearch.logsdb.datageneration.fields.leaf; + +import org.elasticsearch.core.CheckedConsumer; +import org.elasticsearch.logsdb.datageneration.FieldDataGenerator; +import org.elasticsearch.logsdb.datageneration.datasource.DataSource; +import org.elasticsearch.logsdb.datageneration.datasource.DataSourceRequest; +import org.elasticsearch.xcontent.XContentBuilder; + +import java.io.IOException; +import java.util.function.Supplier; + +public class ScaledFloatFieldDataGenerator implements FieldDataGenerator { + private final double scalingFactor; + private final Supplier valueGenerator; + + public ScaledFloatFieldDataGenerator(DataSource dataSource) { + var positiveDoubles = dataSource.get(new DataSourceRequest.DoubleInRangeGenerator(0, Double.MAX_VALUE)); + this.scalingFactor = positiveDoubles.generator().get(); + + var doubles = dataSource.get(new DataSourceRequest.DoubleGenerator()); + var nulls = dataSource.get(new DataSourceRequest.NullWrapper()); + var arrays = dataSource.get(new DataSourceRequest.ArrayWrapper()); + + this.valueGenerator = arrays.wrapper().compose(nulls.wrapper()).apply(() -> doubles.generator().get()); + } + + @Override + public CheckedConsumer mappingWriter() { + return b -> b.startObject().field("type", "scaled_float").field("scaling_factor", scalingFactor).endObject(); + } + + @Override + public CheckedConsumer fieldValueGenerator() { + return b -> b.value(valueGenerator.get()); + } +} diff --git a/test/framework/src/main/java/org/elasticsearch/logsdb/datageneration/fields/leaf/ShortFieldDataGenerator.java b/test/framework/src/main/java/org/elasticsearch/logsdb/datageneration/fields/leaf/ShortFieldDataGenerator.java new file mode 100644 index 0000000000000..511b31794a925 --- /dev/null +++ b/test/framework/src/main/java/org/elasticsearch/logsdb/datageneration/fields/leaf/ShortFieldDataGenerator.java @@ -0,0 +1,40 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License + * 2.0 and the Server Side Public License, v 1; you may not use this file except + * in compliance with, at your election, the Elastic License 2.0 or the Server + * Side Public License, v 1. + */ + +package org.elasticsearch.logsdb.datageneration.fields.leaf; + +import org.elasticsearch.core.CheckedConsumer; +import org.elasticsearch.logsdb.datageneration.FieldDataGenerator; +import org.elasticsearch.logsdb.datageneration.datasource.DataSource; +import org.elasticsearch.logsdb.datageneration.datasource.DataSourceRequest; +import org.elasticsearch.xcontent.XContentBuilder; + +import java.io.IOException; +import java.util.function.Supplier; + +public class ShortFieldDataGenerator implements FieldDataGenerator { + private final Supplier valueGenerator; + + public ShortFieldDataGenerator(DataSource dataSource) { + var shorts = dataSource.get(new DataSourceRequest.ShortGenerator()); + var nulls = dataSource.get(new DataSourceRequest.NullWrapper()); + var arrays = dataSource.get(new DataSourceRequest.ArrayWrapper()); + + this.valueGenerator = arrays.wrapper().compose(nulls.wrapper()).apply(() -> shorts.generator().get()); + } + + @Override + public CheckedConsumer mappingWriter() { + return b -> b.startObject().field("type", "short").endObject(); + } + + @Override + public CheckedConsumer fieldValueGenerator() { + return b -> b.value(valueGenerator.get()); + } +} diff --git a/test/framework/src/main/java/org/elasticsearch/logsdb/datageneration/fields/leaf/UnsignedLongFieldDataGenerator.java b/test/framework/src/main/java/org/elasticsearch/logsdb/datageneration/fields/leaf/UnsignedLongFieldDataGenerator.java new file mode 100644 index 0000000000000..327b3260fdec5 --- /dev/null +++ b/test/framework/src/main/java/org/elasticsearch/logsdb/datageneration/fields/leaf/UnsignedLongFieldDataGenerator.java @@ -0,0 +1,40 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License + * 2.0 and the Server Side Public License, v 1; you may not use this file except + * in compliance with, at your election, the Elastic License 2.0 or the Server + * Side Public License, v 1. + */ + +package org.elasticsearch.logsdb.datageneration.fields.leaf; + +import org.elasticsearch.core.CheckedConsumer; +import org.elasticsearch.logsdb.datageneration.FieldDataGenerator; +import org.elasticsearch.logsdb.datageneration.datasource.DataSource; +import org.elasticsearch.logsdb.datageneration.datasource.DataSourceRequest; +import org.elasticsearch.xcontent.XContentBuilder; + +import java.io.IOException; +import java.util.function.Supplier; + +public class UnsignedLongFieldDataGenerator implements FieldDataGenerator { + private final Supplier valueGenerator; + + public UnsignedLongFieldDataGenerator(DataSource dataSource) { + var unsignedLongs = dataSource.get(new DataSourceRequest.UnsignedLongGenerator()); + var nulls = dataSource.get(new DataSourceRequest.NullWrapper()); + var arrays = dataSource.get(new DataSourceRequest.ArrayWrapper()); + + this.valueGenerator = arrays.wrapper().compose(nulls.wrapper()).apply(() -> unsignedLongs.generator().get()); + } + + @Override + public CheckedConsumer mappingWriter() { + return b -> b.startObject().field("type", "unsigned_long").endObject(); + } + + @Override + public CheckedConsumer fieldValueGenerator() { + return b -> b.value(valueGenerator.get()); + } +} diff --git a/test/framework/src/test/java/org/elasticsearch/logsdb/datageneration/DataGeneratorSnapshotTests.java b/test/framework/src/test/java/org/elasticsearch/logsdb/datageneration/DataGeneratorSnapshotTests.java index e476e02d03778..6c1b0c22f305d 100644 --- a/test/framework/src/test/java/org/elasticsearch/logsdb/datageneration/DataGeneratorSnapshotTests.java +++ b/test/framework/src/test/java/org/elasticsearch/logsdb/datageneration/DataGeneratorSnapshotTests.java @@ -9,16 +9,21 @@ package org.elasticsearch.logsdb.datageneration; import org.elasticsearch.common.Strings; -import org.elasticsearch.logsdb.datageneration.arbitrary.Arbitrary; +import org.elasticsearch.logsdb.datageneration.datasource.DataSourceHandler; +import org.elasticsearch.logsdb.datageneration.datasource.DataSourceRequest; +import org.elasticsearch.logsdb.datageneration.datasource.DataSourceResponse; import org.elasticsearch.test.ESTestCase; import org.elasticsearch.xcontent.XContentBuilder; import org.elasticsearch.xcontent.XContentType; +import java.util.List; +import java.util.Optional; + public class DataGeneratorSnapshotTests extends ESTestCase { public void testSnapshot() throws Exception { var dataGenerator = new DataGenerator( DataGeneratorSpecification.builder() - .withArbitrary(new TestArbitrary()) + .withDataSourceHandlers(List.of(new DataSourceOverrides())) .withMaxFieldCountPerLevel(5) .withMaxObjectDepth(2) .build() @@ -127,85 +132,96 @@ public void testSnapshot() throws Exception { assertEquals(expectedDocument, Strings.toString(document)); } - private class TestArbitrary implements Arbitrary { - private int generatedFields = 0; - private FieldType fieldType = FieldType.KEYWORD; + private static class DataSourceOverrides implements DataSourceHandler { private long longValue = 0; - private long generatedStringValues = 0; + private long generatedStrings = 0; private int generateNullChecks = 0; private int generateArrayChecks = 0; private boolean producedObjectArray = false; + private FieldType fieldType = FieldType.KEYWORD; + private final StaticChildFieldGenerator childFieldGenerator = new StaticChildFieldGenerator(); @Override - public boolean generateSubObject() { - return generatedFields < 6; + public DataSourceResponse.LongGenerator handle(DataSourceRequest.LongGenerator request) { + return new DataSourceResponse.LongGenerator(() -> longValue++); } @Override - public boolean generateNestedObject() { - return generatedFields > 6 && generatedFields < 12; + public DataSourceResponse.StringGenerator handle(DataSourceRequest.StringGenerator request) { + return new DataSourceResponse.StringGenerator(() -> "string" + (generatedStrings++ + 1)); } @Override - public int childFieldCount(int lowerBound, int upperBound) { - assert lowerBound < 2 && upperBound > 2; - return 2; + public DataSourceResponse.NullWrapper handle(DataSourceRequest.NullWrapper request) { + return new DataSourceResponse.NullWrapper((values) -> () -> generateNullChecks++ % 4 == 0 ? null : values.get()); } @Override - public String fieldName(int lengthLowerBound, int lengthUpperBound) { - return "f" + (generatedFields++ + 1); - } + public DataSourceResponse.ArrayWrapper handle(DataSourceRequest.ArrayWrapper request) { - @Override - public FieldType fieldType() { - if (fieldType == FieldType.KEYWORD) { - fieldType = FieldType.LONG; - return FieldType.KEYWORD; - } - - fieldType = FieldType.KEYWORD; - return FieldType.LONG; - } + return new DataSourceResponse.ArrayWrapper((values) -> () -> { + if (generateArrayChecks++ % 4 == 0) { + // we have nulls so can't use List.of + return new Object[] { values.get(), values.get() }; + } - @Override - public long longValue() { - return longValue++; + return values.get(); + }); } @Override - public String stringValue(int lengthLowerBound, int lengthUpperBound) { - return "string" + (generatedStringValues++ + 1); + public DataSourceResponse.ChildFieldGenerator handle(DataSourceRequest.ChildFieldGenerator request) { + + return childFieldGenerator; } @Override - public boolean generateNullValue() { - return generateNullChecks++ % 4 == 0; + public DataSourceResponse.ObjectArrayGenerator handle(DataSourceRequest.ObjectArrayGenerator request) { + return new DataSourceResponse.ObjectArrayGenerator(() -> { + if (producedObjectArray == false) { + producedObjectArray = true; + return Optional.of(2); + } + + return Optional.empty(); + }); } @Override - public boolean generateArrayOfValues() { - return generateArrayChecks++ % 4 == 0; + public DataSourceResponse.FieldTypeGenerator handle(DataSourceRequest.FieldTypeGenerator request) { + return new DataSourceResponse.FieldTypeGenerator(() -> { + if (fieldType == FieldType.KEYWORD) { + fieldType = FieldType.LONG; + return FieldType.KEYWORD; + } + + fieldType = FieldType.KEYWORD; + return FieldType.LONG; + }); } + } + + private static class StaticChildFieldGenerator implements DataSourceResponse.ChildFieldGenerator { + private int generatedFields = 0; @Override - public int valueArraySize() { + public int generateChildFieldCount() { return 2; } @Override - public boolean generateArrayOfObjects() { - if (producedObjectArray == false) { - producedObjectArray = true; - return true; - } + public boolean generateNestedSubObject() { + return generatedFields > 6 && generatedFields < 12; + } - return false; + @Override + public boolean generateRegularSubObject() { + return generatedFields < 6; } @Override - public int objectArraySize() { - return 2; + public String generateFieldName() { + return "f" + (generatedFields++ + 1); } } } diff --git a/test/framework/src/test/java/org/elasticsearch/logsdb/datageneration/DataGeneratorTests.java b/test/framework/src/test/java/org/elasticsearch/logsdb/datageneration/DataGeneratorTests.java index 309c5ad428829..db3b81891e87e 100644 --- a/test/framework/src/test/java/org/elasticsearch/logsdb/datageneration/DataGeneratorTests.java +++ b/test/framework/src/test/java/org/elasticsearch/logsdb/datageneration/DataGeneratorTests.java @@ -11,13 +11,20 @@ import org.elasticsearch.common.bytes.BytesReference; import org.elasticsearch.index.mapper.MapperServiceTestCase; import org.elasticsearch.index.mapper.SourceToParse; -import org.elasticsearch.logsdb.datageneration.arbitrary.Arbitrary; -import org.elasticsearch.logsdb.datageneration.arbitrary.RandomBasedArbitrary; +import org.elasticsearch.index.mapper.extras.MapperExtrasPlugin; +import org.elasticsearch.logsdb.datageneration.datasource.DataSourceHandler; +import org.elasticsearch.logsdb.datageneration.datasource.DataSourceRequest; +import org.elasticsearch.logsdb.datageneration.datasource.DataSourceResponse; +import org.elasticsearch.plugins.Plugin; import org.elasticsearch.test.ESTestCase; import org.elasticsearch.xcontent.XContentBuilder; import org.elasticsearch.xcontent.XContentType; +import org.elasticsearch.xpack.unsignedlong.UnsignedLongMapperPlugin; import java.io.IOException; +import java.util.Collection; +import java.util.List; +import java.util.Optional; public class DataGeneratorTests extends ESTestCase { public void testDataGeneratorSanity() throws IOException { @@ -34,15 +41,21 @@ public void testDataGeneratorSanity() throws IOException { public void testDataGeneratorProducesValidMappingAndDocument() throws IOException { // Make sure objects, nested objects and all field types are covered. - var testArbitrary = new RandomBasedArbitrary() { + var testChildFieldGenerator = new DataSourceResponse.ChildFieldGenerator() { private boolean subObjectCovered = false; private boolean nestedCovered = false; private int generatedFields = 0; @Override - public boolean generateSubObject() { - if (subObjectCovered == false) { - subObjectCovered = true; + public int generateChildFieldCount() { + // Make sure to generate enough fields to go through all field types. + return 20; + } + + @Override + public boolean generateNestedSubObject() { + if (nestedCovered == false) { + nestedCovered = true; return true; } @@ -50,9 +63,9 @@ public boolean generateSubObject() { } @Override - public boolean generateNestedObject() { - if (nestedCovered == false) { - nestedCovered = true; + public boolean generateRegularSubObject() { + if (subObjectCovered == false) { + subObjectCovered = true; return true; } @@ -60,28 +73,37 @@ public boolean generateNestedObject() { } @Override - public int childFieldCount(int lowerBound, int upperBound) { - // Make sure to generate enough fields to go through all field types. - return 20; + public String generateFieldName() { + return "f" + generatedFields++; } + }; + + var dataSourceOverride = new DataSourceHandler() { + private int generatedFields = 0; @Override - public String fieldName(int lengthLowerBound, int lengthUpperBound) { - return "f" + generatedFields++; + public DataSourceResponse.ChildFieldGenerator handle(DataSourceRequest.ChildFieldGenerator request) { + return testChildFieldGenerator; } @Override - public FieldType fieldType() { - return FieldType.values()[generatedFields % FieldType.values().length]; + public DataSourceResponse.FieldTypeGenerator handle(DataSourceRequest.FieldTypeGenerator request) { + return new DataSourceResponse.FieldTypeGenerator(() -> FieldType.values()[generatedFields++ % FieldType.values().length]); } }; - var dataGenerator = new DataGenerator(DataGeneratorSpecification.builder().withArbitrary(testArbitrary).build()); + var dataGenerator = new DataGenerator( + DataGeneratorSpecification.builder().withDataSourceHandlers(List.of(dataSourceOverride)).build() + ); var mapping = XContentBuilder.builder(XContentType.JSON.xContent()); dataGenerator.writeMapping(mapping); var mappingService = new MapperServiceTestCase() { + @Override + protected Collection getPlugins() { + return List.of(new UnsignedLongMapperPlugin(), new MapperExtrasPlugin()); + } }.createMapperService(mapping); var document = XContentBuilder.builder(XContentType.JSON.xContent()); @@ -92,71 +114,49 @@ public FieldType fieldType() { public void testDataGeneratorStressTest() throws IOException { // Let's generate 1000000 fields to test an extreme case (2 levels of objects + 1 leaf level with 100 fields per object). - var arbitrary = new Arbitrary() { + var testChildFieldGenerator = new DataSourceResponse.ChildFieldGenerator() { private int generatedFields = 0; @Override - public boolean generateSubObject() { - return true; + public int generateChildFieldCount() { + return 100; } @Override - public boolean generateNestedObject() { + public boolean generateNestedSubObject() { return false; } @Override - public int childFieldCount(int lowerBound, int upperBound) { - return upperBound; + public boolean generateRegularSubObject() { + return true; } @Override - public String fieldName(int lengthLowerBound, int lengthUpperBound) { + public String generateFieldName() { return "f" + generatedFields++; } + }; + var dataSourceOverride = new DataSourceHandler() { @Override - public FieldType fieldType() { - return FieldType.LONG; - } - - @Override - public long longValue() { - return 0; - } - - @Override - public String stringValue(int lengthLowerBound, int lengthUpperBound) { - return ""; - } - - @Override - public boolean generateNullValue() { - return false; - } - - @Override - public boolean generateArrayOfValues() { - return false; - } - - @Override - public int valueArraySize() { - return 3; + public DataSourceResponse.ChildFieldGenerator handle(DataSourceRequest.ChildFieldGenerator request) { + return testChildFieldGenerator; } @Override - public boolean generateArrayOfObjects() { - return false; + public DataSourceResponse.ObjectArrayGenerator handle(DataSourceRequest.ObjectArrayGenerator request) { + return new DataSourceResponse.ObjectArrayGenerator(Optional::empty); } @Override - public int objectArraySize() { - return 3; + public DataSourceResponse.FieldTypeGenerator handle(DataSourceRequest.FieldTypeGenerator request) { + return new DataSourceResponse.FieldTypeGenerator(() -> FieldType.LONG); } }; + var dataGenerator = new DataGenerator( - DataGeneratorSpecification.builder().withArbitrary(arbitrary).withMaxFieldCountPerLevel(100).withMaxObjectDepth(2).build() + DataGeneratorSpecification.builder().withDataSourceHandlers(List.of(dataSourceOverride)).withMaxObjectDepth(2).build() ); var mapping = XContentBuilder.builder(XContentType.JSON.xContent()); diff --git a/test/framework/src/test/java/org/elasticsearch/logsdb/datageneration/FieldValuesTests.java b/test/framework/src/test/java/org/elasticsearch/logsdb/datageneration/DefaultWrappersHandlerTests.java similarity index 69% rename from test/framework/src/test/java/org/elasticsearch/logsdb/datageneration/FieldValuesTests.java rename to test/framework/src/test/java/org/elasticsearch/logsdb/datageneration/DefaultWrappersHandlerTests.java index 5e6a405ba1f87..1ac6d117f0931 100644 --- a/test/framework/src/test/java/org/elasticsearch/logsdb/datageneration/FieldValuesTests.java +++ b/test/framework/src/test/java/org/elasticsearch/logsdb/datageneration/DefaultWrappersHandlerTests.java @@ -8,21 +8,23 @@ package org.elasticsearch.logsdb.datageneration; -import org.elasticsearch.logsdb.datageneration.arbitrary.RandomBasedArbitrary; -import org.elasticsearch.logsdb.datageneration.fields.FieldValues; +import org.elasticsearch.logsdb.datageneration.datasource.DataSourceRequest; +import org.elasticsearch.logsdb.datageneration.datasource.DefaultWrappersHandler; import org.elasticsearch.test.ESTestCase; import java.util.List; import java.util.function.Supplier; -public class FieldValuesTests extends ESTestCase { +public class DefaultWrappersHandlerTests extends ESTestCase { public void testSanity() { + var sut = new DefaultWrappersHandler(); + Supplier values = () -> 100; - var arbitrary = new RandomBasedArbitrary(); + var nulls = sut.handle(new DataSourceRequest.NullWrapper()); + var arrays = sut.handle(new DataSourceRequest.ArrayWrapper()); + + var valuesWithNullsAndWrappedInArray = arrays.wrapper().compose(nulls.wrapper()).apply(values); - var valuesWithNullsAndWrappedInArray = FieldValues.injectNulls(arbitrary) - .andThen(FieldValues.wrappedInArray(arbitrary)) - .apply(values); var value = valuesWithNullsAndWrappedInArray.get(); if (value instanceof List list) { From 65c57b3e330b522a0a33887f65e6da023e66fa50 Mon Sep 17 00:00:00 2001 From: elasticsearchmachine <58790826+elasticsearchmachine@users.noreply.github.com> Date: Fri, 2 Aug 2024 14:33:33 +1000 Subject: [PATCH 15/36] Mute org.elasticsearch.search.SearchServiceTests org.elasticsearch.search.SearchServiceTests #111529 --- muted-tests.yml | 2 ++ 1 file changed, 2 insertions(+) diff --git a/muted-tests.yml b/muted-tests.yml index 4635bf9541acb..256a7b7ba6987 100644 --- a/muted-tests.yml +++ b/muted-tests.yml @@ -141,6 +141,8 @@ tests: - class: org.elasticsearch.xpack.esql.expression.function.scalar.multivalue.MvPSeriesWeightedSumTests method: testEvaluateBlockWithoutNulls {TestCase=, } issue: https://github.com/elastic/elasticsearch/issues/111498 +- class: org.elasticsearch.search.SearchServiceTests + issue: https://github.com/elastic/elasticsearch/issues/111529 # Examples: # From 6ca3ac253a8d8171f228d9dbfd6c5c924239c226 Mon Sep 17 00:00:00 2001 From: Moritz Mack Date: Fri, 2 Aug 2024 09:26:37 +0200 Subject: [PATCH 16/36] Track raw ingest and storage size separately to support updates by doc (#111179) This PR starts tracking raw ingest and storage size separately for updates by document. This is done capturing the ingest size when initially parsing the update, and storage size when parsing the final, merged document. Additionally this renames DocumentSizeObserver to XContentParserDecorator / XContentMeteringParserDecorator for better reasoning about the code. More renaming will have to follow. --------- Co-authored-by: Przemyslaw Gomulka --- ...teringParserDecoratorWithPipelinesIT.java} | 22 ++++--- ...=> XContentMeteringParserDecoratorIT.java} | 22 +++---- .../org/elasticsearch/TransportVersions.java | 1 + .../bulk/BulkPrimaryExecutionContext.java | 10 ---- .../action/bulk/TransportShardBulkAction.java | 8 +-- .../bulk/TransportSimulateBulkAction.java | 4 +- .../action/index/IndexRequest.java | 37 +++++++----- .../action/update/UpdateHelper.java | 10 ++-- .../common/xcontent/XContentHelper.java | 12 ++-- .../index/mapper/DocumentParser.java | 8 +-- .../index/mapper/ParsedDocument.java | 24 +++++--- .../index/mapper/SourceToParse.java | 16 +++--- .../elasticsearch/ingest/IngestService.java | 15 +++-- .../internal/DocumentParsingProvider.java | 4 +- .../internal/DocumentSizeAccumulator.java | 2 +- .../internal/DocumentSizeObserver.java | 57 ------------------- .../XContentMeteringParserDecorator.java | 31 ++++++++++ .../internal/XContentParserDecorator.java | 17 ++++++ .../bulk/TransportShardBulkActionTests.java | 6 +- .../index/IndexingSlowLogTests.java | 12 ++-- .../index/engine/InternalEngineTests.java | 4 +- .../index/mapper/DynamicTemplatesTests.java | 4 +- .../index/shard/RefreshListenersTests.java | 4 +- .../index/translog/TranslogTests.java | 4 +- .../ingest/IngestServiceTests.java | 20 +++---- .../index/engine/EngineTestCase.java | 3 +- .../index/mapper/MapperServiceTestCase.java | 4 +- 27 files changed, 180 insertions(+), 181 deletions(-) rename modules/ingest-common/src/internalClusterTest/java/org/elasticsearch/plugins/internal/{DocumentSizeObserverWithPipelinesIT.java => XContentMeteringParserDecoratorWithPipelinesIT.java} (85%) rename server/src/internalClusterTest/java/org/elasticsearch/plugins/internal/{DocumentSizeObserverIT.java => XContentMeteringParserDecoratorIT.java} (88%) delete mode 100644 server/src/main/java/org/elasticsearch/plugins/internal/DocumentSizeObserver.java create mode 100644 server/src/main/java/org/elasticsearch/plugins/internal/XContentMeteringParserDecorator.java create mode 100644 server/src/main/java/org/elasticsearch/plugins/internal/XContentParserDecorator.java diff --git a/modules/ingest-common/src/internalClusterTest/java/org/elasticsearch/plugins/internal/DocumentSizeObserverWithPipelinesIT.java b/modules/ingest-common/src/internalClusterTest/java/org/elasticsearch/plugins/internal/XContentMeteringParserDecoratorWithPipelinesIT.java similarity index 85% rename from modules/ingest-common/src/internalClusterTest/java/org/elasticsearch/plugins/internal/DocumentSizeObserverWithPipelinesIT.java rename to modules/ingest-common/src/internalClusterTest/java/org/elasticsearch/plugins/internal/XContentMeteringParserDecoratorWithPipelinesIT.java index 16a8013ae9c4a..7f0910ea5cc4d 100644 --- a/modules/ingest-common/src/internalClusterTest/java/org/elasticsearch/plugins/internal/DocumentSizeObserverWithPipelinesIT.java +++ b/modules/ingest-common/src/internalClusterTest/java/org/elasticsearch/plugins/internal/XContentMeteringParserDecoratorWithPipelinesIT.java @@ -14,6 +14,7 @@ import org.elasticsearch.common.bytes.BytesArray; import org.elasticsearch.common.bytes.BytesReference; import org.elasticsearch.index.mapper.MapperService; +import org.elasticsearch.index.mapper.ParsedDocument; import org.elasticsearch.ingest.common.IngestCommonPlugin; import org.elasticsearch.plugins.IngestPlugin; import org.elasticsearch.plugins.Plugin; @@ -32,7 +33,7 @@ import static org.hamcrest.Matchers.equalTo; @ESIntegTestCase.ClusterScope(scope = ESIntegTestCase.Scope.TEST) -public class DocumentSizeObserverWithPipelinesIT extends ESIntegTestCase { +public class XContentMeteringParserDecoratorWithPipelinesIT extends ESIntegTestCase { private static String TEST_INDEX_NAME = "test-index-name"; // the assertions are done in plugin which is static and will be created by ES server. @@ -90,13 +91,13 @@ public DocumentParsingProvider getDocumentParsingProvider() { // returns a static instance, because we want to assert that the wrapping is called only once return new DocumentParsingProvider() { @Override - public DocumentSizeObserver newDocumentSizeObserver(DocWriteRequest request) { + public XContentMeteringParserDecorator newMeteringParserDecorator(DocWriteRequest request) { if (request instanceof IndexRequest indexRequest && indexRequest.getNormalisedBytesParsed() > 0) { long normalisedBytesParsed = indexRequest.getNormalisedBytesParsed(); providedFixedSize.set(normalisedBytesParsed); - return new TestDocumentSizeObserver(normalisedBytesParsed); + return new TestXContentMeteringParserDecorator(normalisedBytesParsed); } - return new TestDocumentSizeObserver(0L); + return new TestXContentMeteringParserDecorator(0L); } @Override @@ -111,17 +112,15 @@ public DocumentSizeReporter newDocumentSizeReporter( } } - public static class TestDocumentSizeObserver implements DocumentSizeObserver { + public static class TestXContentMeteringParserDecorator implements XContentMeteringParserDecorator { long mapCounter = 0; - long wrapperCounter = 0; - public TestDocumentSizeObserver(long mapCounter) { + public TestXContentMeteringParserDecorator(long mapCounter) { this.mapCounter = mapCounter; } @Override - public XContentParser wrapParser(XContentParser xContentParser) { - wrapperCounter++; + public XContentParser decorate(XContentParser xContentParser) { hasWrappedParser = true; return new FilterXContentParserWrapper(xContentParser) { @@ -134,10 +133,9 @@ public Map map() throws IOException { } @Override - public long normalisedBytesParsed() { - return mapCounter; + public ParsedDocument.DocumentSize meteredDocumentSize() { + return new ParsedDocument.DocumentSize(mapCounter, 0); } - } } diff --git a/server/src/internalClusterTest/java/org/elasticsearch/plugins/internal/DocumentSizeObserverIT.java b/server/src/internalClusterTest/java/org/elasticsearch/plugins/internal/XContentMeteringParserDecoratorIT.java similarity index 88% rename from server/src/internalClusterTest/java/org/elasticsearch/plugins/internal/DocumentSizeObserverIT.java rename to server/src/internalClusterTest/java/org/elasticsearch/plugins/internal/XContentMeteringParserDecoratorIT.java index 7797371a2823b..16fb618e97dfc 100644 --- a/server/src/internalClusterTest/java/org/elasticsearch/plugins/internal/DocumentSizeObserverIT.java +++ b/server/src/internalClusterTest/java/org/elasticsearch/plugins/internal/XContentMeteringParserDecoratorIT.java @@ -34,7 +34,7 @@ import static org.hamcrest.Matchers.equalTo; @ESIntegTestCase.ClusterScope(scope = ESIntegTestCase.Scope.TEST) -public class DocumentSizeObserverIT extends ESIntegTestCase { +public class XContentMeteringParserDecoratorIT extends ESIntegTestCase { private static String TEST_INDEX_NAME = "test-index-name"; @@ -125,8 +125,8 @@ public TestDocumentParsingProviderPlugin() {} public DocumentParsingProvider getDocumentParsingProvider() { return new DocumentParsingProvider() { @Override - public DocumentSizeObserver newDocumentSizeObserver(DocWriteRequest request) { - return new TestDocumentSizeObserver(0L); + public XContentMeteringParserDecorator newMeteringParserDecorator(DocWriteRequest request) { + return new TestXContentMeteringParserDecorator(0L); } @Override @@ -151,20 +151,23 @@ public TestDocumentSizeReporter(String indexName) { @Override public void onIndexingCompleted(ParsedDocument parsedDocument) { - COUNTER.addAndGet(parsedDocument.getDocumentSizeObserver().normalisedBytesParsed()); + long delta = parsedDocument.getNormalizedSize().ingestedBytes(); + if (delta > 0) { + COUNTER.addAndGet(delta); + } assertThat(indexName, equalTo(TEST_INDEX_NAME)); } } - public static class TestDocumentSizeObserver implements DocumentSizeObserver { + public static class TestXContentMeteringParserDecorator implements XContentMeteringParserDecorator { long counter = 0; - public TestDocumentSizeObserver(long counter) { + public TestXContentMeteringParserDecorator(long counter) { this.counter = counter; } @Override - public XContentParser wrapParser(XContentParser xContentParser) { + public XContentParser decorate(XContentParser xContentParser) { hasWrappedParser = true; return new FilterXContentParserWrapper(xContentParser) { @@ -177,9 +180,8 @@ public Token nextToken() throws IOException { } @Override - public long normalisedBytesParsed() { - return counter; + public ParsedDocument.DocumentSize meteredDocumentSize() { + return new ParsedDocument.DocumentSize(counter, counter); } - } } diff --git a/server/src/main/java/org/elasticsearch/TransportVersions.java b/server/src/main/java/org/elasticsearch/TransportVersions.java index 7d1204d1a51c0..8d0c071225c45 100644 --- a/server/src/main/java/org/elasticsearch/TransportVersions.java +++ b/server/src/main/java/org/elasticsearch/TransportVersions.java @@ -181,6 +181,7 @@ static TransportVersion def(int id) { public static final TransportVersion SEGMENT_LEVEL_FIELDS_STATS = def(8_711_00_0); public static final TransportVersion ML_ADD_DETECTION_RULE_PARAMS = def(8_712_00_0); public static final TransportVersion FIX_VECTOR_SIMILARITY_INNER_HITS = def(8_713_00_0); + public static final TransportVersion INDEX_REQUEST_UPDATE_BY_DOC_ORIGIN = def(8_714_00_0); /* * STOP! READ THIS FIRST! No, really, diff --git a/server/src/main/java/org/elasticsearch/action/bulk/BulkPrimaryExecutionContext.java b/server/src/main/java/org/elasticsearch/action/bulk/BulkPrimaryExecutionContext.java index ac3a6be70d29f..28eef30f9185d 100644 --- a/server/src/main/java/org/elasticsearch/action/bulk/BulkPrimaryExecutionContext.java +++ b/server/src/main/java/org/elasticsearch/action/bulk/BulkPrimaryExecutionContext.java @@ -18,7 +18,6 @@ import org.elasticsearch.index.engine.Engine; import org.elasticsearch.index.shard.IndexShard; import org.elasticsearch.index.translog.Translog; -import org.elasticsearch.plugins.internal.DocumentSizeObserver; import java.util.Arrays; import java.util.List; @@ -63,7 +62,6 @@ enum ItemProcessingState { private BulkItemResponse executionResult; private int updateRetryCounter; private long noopMappingUpdateRetryForMappingVersion; - private DocumentSizeObserver documentSizeObserver = DocumentSizeObserver.EMPTY_INSTANCE; BulkPrimaryExecutionContext(BulkShardRequest request, IndexShard primary) { this.request = request; @@ -369,12 +367,4 @@ private boolean assertInvariants(ItemProcessingState... expectedCurrentState) { } return true; } - - public void setDocumentSizeObserver(DocumentSizeObserver documentSizeObserver) { - this.documentSizeObserver = documentSizeObserver; - } - - public DocumentSizeObserver getDocumentSizeObserver() { - return documentSizeObserver; - } } diff --git a/server/src/main/java/org/elasticsearch/action/bulk/TransportShardBulkAction.java b/server/src/main/java/org/elasticsearch/action/bulk/TransportShardBulkAction.java index 56dd651f1611e..7b0538ac277c4 100644 --- a/server/src/main/java/org/elasticsearch/action/bulk/TransportShardBulkAction.java +++ b/server/src/main/java/org/elasticsearch/action/bulk/TransportShardBulkAction.java @@ -59,7 +59,7 @@ import org.elasticsearch.indices.SystemIndices; import org.elasticsearch.node.NodeClosedException; import org.elasticsearch.plugins.internal.DocumentParsingProvider; -import org.elasticsearch.plugins.internal.DocumentSizeObserver; +import org.elasticsearch.plugins.internal.XContentMeteringParserDecorator; import org.elasticsearch.threadpool.ThreadPool; import org.elasticsearch.transport.TransportRequestOptions; import org.elasticsearch.transport.TransportService; @@ -364,16 +364,14 @@ static boolean executeBulkItemRequest( } else { final IndexRequest request = context.getRequestToExecute(); - DocumentSizeObserver documentSizeObserver = documentParsingProvider.newDocumentSizeObserver(request); - - context.setDocumentSizeObserver(documentSizeObserver); + XContentMeteringParserDecorator meteringParserDecorator = documentParsingProvider.newMeteringParserDecorator(request); final SourceToParse sourceToParse = new SourceToParse( request.id(), request.source(), request.getContentType(), request.routing(), request.getDynamicTemplates(), - documentSizeObserver + meteringParserDecorator ); result = primary.applyIndexOperationOnPrimary( version, diff --git a/server/src/main/java/org/elasticsearch/action/bulk/TransportSimulateBulkAction.java b/server/src/main/java/org/elasticsearch/action/bulk/TransportSimulateBulkAction.java index c08ed6413a7a1..ce8c56fe91b9f 100644 --- a/server/src/main/java/org/elasticsearch/action/bulk/TransportSimulateBulkAction.java +++ b/server/src/main/java/org/elasticsearch/action/bulk/TransportSimulateBulkAction.java @@ -31,7 +31,7 @@ import org.elasticsearch.indices.SystemIndices; import org.elasticsearch.ingest.IngestService; import org.elasticsearch.ingest.SimulateIngestService; -import org.elasticsearch.plugins.internal.DocumentSizeObserver; +import org.elasticsearch.plugins.internal.XContentMeteringParserDecorator; import org.elasticsearch.tasks.Task; import org.elasticsearch.threadpool.ThreadPool; import org.elasticsearch.transport.TransportService; @@ -122,7 +122,7 @@ private Exception validateMappings(IndexRequest request) { request.getContentType(), request.routing(), request.getDynamicTemplates(), - DocumentSizeObserver.EMPTY_INSTANCE + XContentMeteringParserDecorator.NOOP ); ClusterState state = clusterService.state(); diff --git a/server/src/main/java/org/elasticsearch/action/index/IndexRequest.java b/server/src/main/java/org/elasticsearch/action/index/IndexRequest.java index 5463f9fec4d2a..61d610c9eda4e 100644 --- a/server/src/main/java/org/elasticsearch/action/index/IndexRequest.java +++ b/server/src/main/java/org/elasticsearch/action/index/IndexRequest.java @@ -38,7 +38,7 @@ import org.elasticsearch.index.mapper.MapperService; import org.elasticsearch.index.shard.ShardId; import org.elasticsearch.ingest.IngestService; -import org.elasticsearch.plugins.internal.DocumentSizeObserver; +import org.elasticsearch.plugins.internal.XContentParserDecorator; import org.elasticsearch.xcontent.XContentBuilder; import org.elasticsearch.xcontent.XContentFactory; import org.elasticsearch.xcontent.XContentType; @@ -147,6 +147,7 @@ public class IndexRequest extends ReplicatedWriteRequest implement private Object rawTimestamp; private long normalisedBytesParsed = -1; private boolean originatesFromUpdateByScript; + private boolean originatesFromUpdateByDoc; public IndexRequest(StreamInput in) throws IOException { this(null, in); @@ -204,6 +205,12 @@ public IndexRequest(@Nullable ShardId shardId, StreamInput in) throws IOExceptio } else { originatesFromUpdateByScript = false; } + + if (in.getTransportVersion().onOrAfter(TransportVersions.INDEX_REQUEST_UPDATE_BY_DOC_ORIGIN)) { + originatesFromUpdateByDoc = in.readBoolean(); + } else { + originatesFromUpdateByDoc = false; + } } public IndexRequest() { @@ -407,8 +414,8 @@ public Map sourceAsMap() { return XContentHelper.convertToMap(source, false, contentType).v2(); } - public Map sourceAsMap(DocumentSizeObserver documentSizeObserver) { - return XContentHelper.convertToMap(source, false, contentType, documentSizeObserver).v2(); + public Map sourceAsMap(XContentParserDecorator parserDecorator) { + return XContentHelper.convertToMap(source, false, contentType, parserDecorator).v2(); } /** @@ -768,6 +775,10 @@ private void writeBody(StreamOutput out) throws IOException { if (out.getTransportVersion().onOrAfter(TransportVersions.INDEX_REQUEST_UPDATE_BY_SCRIPT_ORIGIN)) { out.writeBoolean(originatesFromUpdateByScript); } + + if (out.getTransportVersion().onOrAfter(TransportVersions.INDEX_REQUEST_UPDATE_BY_DOC_ORIGIN)) { + out.writeBoolean(originatesFromUpdateByDoc); + } } @Override @@ -931,15 +942,6 @@ public IndexRequest setNormalisedBytesParsed(long normalisedBytesParsed) { return this; } - /** - * when observing document size while parsing, this method indicates that this request should not be recorded. - * @return an index request - */ - public IndexRequest noParsedBytesToReport() { - this.normalisedBytesParsed = 0; - return this; - } - /** * Adds the pipeline to the list of executed pipelines, if listExecutedPipelines is true * @@ -977,6 +979,15 @@ public IndexRequest setOriginatesFromUpdateByScript(boolean originatesFromUpdate } public boolean originatesFromUpdateByScript() { - return this.originatesFromUpdateByScript; + return originatesFromUpdateByScript; + } + + public boolean originatesFromUpdateByDoc() { + return originatesFromUpdateByDoc; + } + + public IndexRequest setOriginatesFromUpdateByDoc(boolean originatesFromUpdateByDoc) { + this.originatesFromUpdateByDoc = originatesFromUpdateByDoc; + return this; } } diff --git a/server/src/main/java/org/elasticsearch/action/update/UpdateHelper.java b/server/src/main/java/org/elasticsearch/action/update/UpdateHelper.java index 6b54654d7fbe9..056eb70c53269 100644 --- a/server/src/main/java/org/elasticsearch/action/update/UpdateHelper.java +++ b/server/src/main/java/org/elasticsearch/action/update/UpdateHelper.java @@ -27,7 +27,7 @@ import org.elasticsearch.index.shard.IndexShard; import org.elasticsearch.index.shard.ShardId; import org.elasticsearch.plugins.internal.DocumentParsingProvider; -import org.elasticsearch.plugins.internal.DocumentSizeObserver; +import org.elasticsearch.plugins.internal.XContentMeteringParserDecorator; import org.elasticsearch.script.Script; import org.elasticsearch.script.ScriptService; import org.elasticsearch.script.UpdateCtxMap; @@ -181,14 +181,14 @@ static String calculateRouting(GetResult getResult, @Nullable IndexRequest updat Result prepareUpdateIndexRequest(ShardId shardId, UpdateRequest request, GetResult getResult, boolean detectNoop) { final IndexRequest currentRequest = request.doc(); final String routing = calculateRouting(getResult, currentRequest); - final DocumentSizeObserver documentSizeObserver = documentParsingProvider.newDocumentSizeObserver(request); + final XContentMeteringParserDecorator meteringParserDecorator = documentParsingProvider.newMeteringParserDecorator(request); final Tuple> sourceAndContent = XContentHelper.convertToMap(getResult.internalSourceRef(), true); final XContentType updateSourceContentType = sourceAndContent.v1(); final Map updatedSourceAsMap = sourceAndContent.v2(); final boolean noop = XContentHelper.update( updatedSourceAsMap, - currentRequest.sourceAsMap(documentSizeObserver), + currentRequest.sourceAsMap(meteringParserDecorator), detectNoop ) == false; @@ -226,8 +226,8 @@ Result prepareUpdateIndexRequest(ShardId shardId, UpdateRequest request, GetResu .waitForActiveShards(request.waitForActiveShards()) .timeout(request.timeout()) .setRefreshPolicy(request.getRefreshPolicy()) - .setNormalisedBytesParsed(documentSizeObserver.normalisedBytesParsed()); - + .setOriginatesFromUpdateByDoc(true); + finalIndexRequest.setNormalisedBytesParsed(meteringParserDecorator.meteredDocumentSize().ingestedBytes()); return new Result(finalIndexRequest, DocWriteResponse.Result.UPDATED, updatedSourceAsMap, updateSourceContentType); } } diff --git a/server/src/main/java/org/elasticsearch/common/xcontent/XContentHelper.java b/server/src/main/java/org/elasticsearch/common/xcontent/XContentHelper.java index 9998cb55064e3..ea2d6e2d6e8e4 100644 --- a/server/src/main/java/org/elasticsearch/common/xcontent/XContentHelper.java +++ b/server/src/main/java/org/elasticsearch/common/xcontent/XContentHelper.java @@ -20,7 +20,7 @@ import org.elasticsearch.core.CheckedFunction; import org.elasticsearch.core.Nullable; import org.elasticsearch.core.Tuple; -import org.elasticsearch.plugins.internal.DocumentSizeObserver; +import org.elasticsearch.plugins.internal.XContentParserDecorator; import org.elasticsearch.xcontent.DeprecationHandler; import org.elasticsearch.xcontent.NamedXContentRegistry; import org.elasticsearch.xcontent.ToXContent; @@ -152,14 +152,14 @@ public static Tuple> convertToMap( BytesReference bytes, boolean ordered, XContentType xContentType, - DocumentSizeObserver documentSizeObserver + XContentParserDecorator parserDecorator ) { return parseToType( ordered ? XContentParser::mapOrdered : XContentParser::map, bytes, xContentType, XContentParserConfiguration.EMPTY, - documentSizeObserver + parserDecorator ); } @@ -207,7 +207,7 @@ public static Tuple parseToType( @Nullable XContentType xContentType, @Nullable XContentParserConfiguration config ) throws ElasticsearchParseException { - return parseToType(extractor, bytes, xContentType, config, DocumentSizeObserver.EMPTY_INSTANCE); + return parseToType(extractor, bytes, xContentType, config, XContentParserDecorator.NOOP); } public static Tuple parseToType( @@ -215,11 +215,11 @@ public static Tuple parseToType( BytesReference bytes, @Nullable XContentType xContentType, @Nullable XContentParserConfiguration config, - DocumentSizeObserver documentSizeObserver + XContentParserDecorator parserDecorator ) throws ElasticsearchParseException { config = config != null ? config : XContentParserConfiguration.EMPTY; try ( - XContentParser parser = documentSizeObserver.wrapParser( + XContentParser parser = parserDecorator.decorate( xContentType != null ? createParser(config, bytes, xContentType) : createParser(config, bytes) ) ) { diff --git a/server/src/main/java/org/elasticsearch/index/mapper/DocumentParser.java b/server/src/main/java/org/elasticsearch/index/mapper/DocumentParser.java index 8bf7f3f4e72a3..aad8d5f6dfa2a 100644 --- a/server/src/main/java/org/elasticsearch/index/mapper/DocumentParser.java +++ b/server/src/main/java/org/elasticsearch/index/mapper/DocumentParser.java @@ -23,7 +23,7 @@ import org.elasticsearch.index.mapper.vectors.DenseVectorFieldMapper; import org.elasticsearch.index.query.SearchExecutionContext; import org.elasticsearch.indices.breaker.NoneCircuitBreakerService; -import org.elasticsearch.plugins.internal.DocumentSizeObserver; +import org.elasticsearch.plugins.internal.XContentMeteringParserDecorator; import org.elasticsearch.search.lookup.SearchLookup; import org.elasticsearch.search.lookup.Source; import org.elasticsearch.xcontent.XContentBuilder; @@ -76,9 +76,9 @@ public ParsedDocument parseDocument(SourceToParse source, MappingLookup mappingL final RootDocumentParserContext context; final XContentType xContentType = source.getXContentType(); - DocumentSizeObserver documentSizeObserver = source.getDocumentSizeObserver(); + XContentMeteringParserDecorator meteringParserDecorator = source.getDocumentSizeObserver(); try ( - XContentParser parser = documentSizeObserver.wrapParser( + XContentParser parser = meteringParserDecorator.decorate( XContentHelper.createParser(parserConfiguration, source.source(), xContentType) ) ) { @@ -106,7 +106,7 @@ public ParsedDocument parseDocument(SourceToParse source, MappingLookup mappingL context.sourceToParse().source(), context.sourceToParse().getXContentType(), dynamicUpdate, - documentSizeObserver + meteringParserDecorator.meteredDocumentSize() ) { @Override public String documentDescription() { diff --git a/server/src/main/java/org/elasticsearch/index/mapper/ParsedDocument.java b/server/src/main/java/org/elasticsearch/index/mapper/ParsedDocument.java index d500d42e45fae..8076c836cd0cb 100644 --- a/server/src/main/java/org/elasticsearch/index/mapper/ParsedDocument.java +++ b/server/src/main/java/org/elasticsearch/index/mapper/ParsedDocument.java @@ -14,7 +14,6 @@ import org.elasticsearch.common.bytes.BytesArray; import org.elasticsearch.common.bytes.BytesReference; import org.elasticsearch.index.mapper.MapperService.MergeReason; -import org.elasticsearch.plugins.internal.DocumentSizeObserver; import org.elasticsearch.xcontent.XContentType; import java.util.Collections; @@ -24,7 +23,6 @@ * The result of parsing a document. */ public class ParsedDocument { - private final Field version; private final String id; @@ -34,7 +32,7 @@ public class ParsedDocument { private final List documents; - private final DocumentSizeObserver documentSizeObserver; + private final DocumentSize normalizedSize; private BytesReference source; private XContentType xContentType; @@ -62,7 +60,7 @@ public static ParsedDocument noopTombstone(String reason) { new BytesArray("{}"), XContentType.JSON, null, - DocumentSizeObserver.EMPTY_INSTANCE + DocumentSize.UNKNOWN ); } @@ -87,7 +85,7 @@ public static ParsedDocument deleteTombstone(String id) { new BytesArray("{}"), XContentType.JSON, null, - DocumentSizeObserver.EMPTY_INSTANCE + DocumentSize.UNKNOWN ); } @@ -100,7 +98,7 @@ public ParsedDocument( BytesReference source, XContentType xContentType, Mapping dynamicMappingsUpdate, - DocumentSizeObserver documentSizeObserver + DocumentSize normalizedSize ) { this.version = version; this.seqID = seqID; @@ -110,7 +108,7 @@ public ParsedDocument( this.source = source; this.dynamicMappingsUpdate = dynamicMappingsUpdate; this.xContentType = xContentType; - this.documentSizeObserver = documentSizeObserver; + this.normalizedSize = normalizedSize; } public String id() { @@ -179,8 +177,16 @@ public String documentDescription() { return "id"; } - public DocumentSizeObserver getDocumentSizeObserver() { - return documentSizeObserver; + public DocumentSize getNormalizedSize() { + return normalizedSize; } + /** + * Normalized ingested and stored size of a document. + * @param ingestedBytes ingest size of the document + * @param storedBytes stored retained size of the document + */ + public record DocumentSize(long ingestedBytes, long storedBytes) { + public static final DocumentSize UNKNOWN = new DocumentSize(-1, -1); + } } diff --git a/server/src/main/java/org/elasticsearch/index/mapper/SourceToParse.java b/server/src/main/java/org/elasticsearch/index/mapper/SourceToParse.java index 6a020127019f5..ddf92de647ae5 100644 --- a/server/src/main/java/org/elasticsearch/index/mapper/SourceToParse.java +++ b/server/src/main/java/org/elasticsearch/index/mapper/SourceToParse.java @@ -11,7 +11,7 @@ import org.elasticsearch.common.bytes.BytesArray; import org.elasticsearch.common.bytes.BytesReference; import org.elasticsearch.core.Nullable; -import org.elasticsearch.plugins.internal.DocumentSizeObserver; +import org.elasticsearch.plugins.internal.XContentMeteringParserDecorator; import org.elasticsearch.xcontent.XContentType; import java.util.Map; @@ -28,7 +28,7 @@ public class SourceToParse { private final XContentType xContentType; private final Map dynamicTemplates; - private final DocumentSizeObserver documentSizeObserver; + private final XContentMeteringParserDecorator meteringParserDecorator; public SourceToParse( @Nullable String id, @@ -36,7 +36,7 @@ public SourceToParse( XContentType xContentType, @Nullable String routing, Map dynamicTemplates, - DocumentSizeObserver documentSizeObserver + XContentMeteringParserDecorator meteringParserDecorator ) { this.id = id; // we always convert back to byte array, since we store it and Field only supports bytes.. @@ -45,15 +45,15 @@ public SourceToParse( this.xContentType = Objects.requireNonNull(xContentType); this.routing = routing; this.dynamicTemplates = Objects.requireNonNull(dynamicTemplates); - this.documentSizeObserver = documentSizeObserver; + this.meteringParserDecorator = meteringParserDecorator; } public SourceToParse(String id, BytesReference source, XContentType xContentType) { - this(id, source, xContentType, null, Map.of(), DocumentSizeObserver.EMPTY_INSTANCE); + this(id, source, xContentType, null, Map.of(), XContentMeteringParserDecorator.NOOP); } public SourceToParse(String id, BytesReference source, XContentType xContentType, String routing) { - this(id, source, xContentType, routing, Map.of(), DocumentSizeObserver.EMPTY_INSTANCE); + this(id, source, xContentType, routing, Map.of(), XContentMeteringParserDecorator.NOOP); } public BytesReference source() { @@ -90,7 +90,7 @@ public XContentType getXContentType() { return this.xContentType; } - public DocumentSizeObserver getDocumentSizeObserver() { - return documentSizeObserver; + public XContentMeteringParserDecorator getDocumentSizeObserver() { + return meteringParserDecorator; } } diff --git a/server/src/main/java/org/elasticsearch/ingest/IngestService.java b/server/src/main/java/org/elasticsearch/ingest/IngestService.java index 5ab68960dff5b..dde30377df15b 100644 --- a/server/src/main/java/org/elasticsearch/ingest/IngestService.java +++ b/server/src/main/java/org/elasticsearch/ingest/IngestService.java @@ -61,7 +61,8 @@ import org.elasticsearch.node.ReportingService; import org.elasticsearch.plugins.IngestPlugin; import org.elasticsearch.plugins.internal.DocumentParsingProvider; -import org.elasticsearch.plugins.internal.DocumentSizeObserver; +import org.elasticsearch.plugins.internal.XContentMeteringParserDecorator; +import org.elasticsearch.plugins.internal.XContentParserDecorator; import org.elasticsearch.script.ScriptService; import org.elasticsearch.threadpool.Scheduler; import org.elasticsearch.threadpool.ThreadPool; @@ -750,8 +751,10 @@ protected void doRun() { } final int slot = i; final Releasable ref = refs.acquire(); - final DocumentSizeObserver documentSizeObserver = documentParsingProvider.newDocumentSizeObserver(indexRequest); - final IngestDocument ingestDocument = newIngestDocument(indexRequest, documentSizeObserver); + final XContentMeteringParserDecorator meteringParserDecorator = documentParsingProvider.newMeteringParserDecorator( + indexRequest + ); + final IngestDocument ingestDocument = newIngestDocument(indexRequest, meteringParserDecorator); final org.elasticsearch.script.Metadata originalDocumentMetadata = ingestDocument.getMetadata().clone(); // the document listener gives us three-way logic: a document can fail processing (1), or it can // be successfully processed. a successfully processed document can be kept (2) or dropped (3). @@ -792,7 +795,7 @@ public void onFailure(Exception e) { ); executePipelines(pipelines, indexRequest, ingestDocument, shouldStoreFailure, documentListener); - indexRequest.setNormalisedBytesParsed(documentSizeObserver.normalisedBytesParsed()); + indexRequest.setNormalisedBytesParsed(meteringParserDecorator.meteredDocumentSize().ingestedBytes()); assert actionRequest.index() != null; i++; @@ -1094,14 +1097,14 @@ static String getProcessorName(Processor processor) { /** * Builds a new ingest document from the passed-in index request. */ - private static IngestDocument newIngestDocument(final IndexRequest request, DocumentSizeObserver documentSizeObserver) { + private static IngestDocument newIngestDocument(final IndexRequest request, XContentParserDecorator parserDecorator) { return new IngestDocument( request.index(), request.id(), request.version(), request.routing(), request.versionType(), - request.sourceAsMap(documentSizeObserver) + request.sourceAsMap(parserDecorator) ); } diff --git a/server/src/main/java/org/elasticsearch/plugins/internal/DocumentParsingProvider.java b/server/src/main/java/org/elasticsearch/plugins/internal/DocumentParsingProvider.java index d29b893447be0..adfef008f2096 100644 --- a/server/src/main/java/org/elasticsearch/plugins/internal/DocumentParsingProvider.java +++ b/server/src/main/java/org/elasticsearch/plugins/internal/DocumentParsingProvider.java @@ -39,7 +39,7 @@ default DocumentSizeAccumulator createDocumentSizeAccumulator() { /** * @return an observer */ - default DocumentSizeObserver newDocumentSizeObserver(DocWriteRequest request) { - return DocumentSizeObserver.EMPTY_INSTANCE; + default XContentMeteringParserDecorator newMeteringParserDecorator(DocWriteRequest request) { + return XContentMeteringParserDecorator.NOOP; } } diff --git a/server/src/main/java/org/elasticsearch/plugins/internal/DocumentSizeAccumulator.java b/server/src/main/java/org/elasticsearch/plugins/internal/DocumentSizeAccumulator.java index 27bce3c637c65..6f351a3762217 100644 --- a/server/src/main/java/org/elasticsearch/plugins/internal/DocumentSizeAccumulator.java +++ b/server/src/main/java/org/elasticsearch/plugins/internal/DocumentSizeAccumulator.java @@ -13,7 +13,7 @@ import java.util.Map; /** - * An interface to allow accumulating results of document parsing (collected with {@link DocumentSizeObserver}) + * An interface to allow accumulating results of document parsing (collected with {@link XContentParserDecorator}) */ public interface DocumentSizeAccumulator { DocumentSizeAccumulator EMPTY_INSTANCE = new DocumentSizeAccumulator() { diff --git a/server/src/main/java/org/elasticsearch/plugins/internal/DocumentSizeObserver.java b/server/src/main/java/org/elasticsearch/plugins/internal/DocumentSizeObserver.java deleted file mode 100644 index 386a90b65b60f..0000000000000 --- a/server/src/main/java/org/elasticsearch/plugins/internal/DocumentSizeObserver.java +++ /dev/null @@ -1,57 +0,0 @@ -/* - * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one - * or more contributor license agreements. Licensed under the Elastic License - * 2.0 and the Server Side Public License, v 1; you may not use this file except - * in compliance with, at your election, the Elastic License 2.0 or the Server - * Side Public License, v 1. - */ - -package org.elasticsearch.plugins.internal; - -import org.elasticsearch.xcontent.XContentParser; - -/** - * An interface to allow wrapping an XContentParser and observe the events emitted while parsing - * A default implementation returns a noop DocumentSizeObserver - */ -public interface DocumentSizeObserver { - /** - * a default noop implementation - */ - DocumentSizeObserver EMPTY_INSTANCE = new DocumentSizeObserver() { - @Override - public XContentParser wrapParser(XContentParser xContentParser) { - return xContentParser; - } - - @Override - public long normalisedBytesParsed() { - return 0; - } - - }; - - /** - * Decorates a provided xContentParser with additional logic (gather some state). - * - * @param xContentParser to be decorated - * @return a decorator xContentParser - */ - XContentParser wrapParser(XContentParser xContentParser); - - /** - * Returns the state gathered during parsing - * - * @return a number representing a state parsed - */ - long normalisedBytesParsed(); - - /** - * Indicates if an observer was used on an update request with script - * - * @return true if update was done by script, false otherwise - */ - default boolean isUpdateByScript() { - return false; - } -} diff --git a/server/src/main/java/org/elasticsearch/plugins/internal/XContentMeteringParserDecorator.java b/server/src/main/java/org/elasticsearch/plugins/internal/XContentMeteringParserDecorator.java new file mode 100644 index 0000000000000..4fce2a20415a3 --- /dev/null +++ b/server/src/main/java/org/elasticsearch/plugins/internal/XContentMeteringParserDecorator.java @@ -0,0 +1,31 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License + * 2.0 and the Server Side Public License, v 1; you may not use this file except + * in compliance with, at your election, the Elastic License 2.0 or the Server + * Side Public License, v 1. + */ + +package org.elasticsearch.plugins.internal; + +import org.elasticsearch.index.mapper.ParsedDocument.DocumentSize; +import org.elasticsearch.xcontent.XContentParser; + +public interface XContentMeteringParserDecorator extends XContentParserDecorator { + /** + * a default noop implementation + */ + XContentMeteringParserDecorator NOOP = new XContentMeteringParserDecorator() { + @Override + public DocumentSize meteredDocumentSize() { + return DocumentSize.UNKNOWN; + } + + @Override + public XContentParser decorate(XContentParser xContentParser) { + return xContentParser; + } + }; + + DocumentSize meteredDocumentSize(); +} diff --git a/server/src/main/java/org/elasticsearch/plugins/internal/XContentParserDecorator.java b/server/src/main/java/org/elasticsearch/plugins/internal/XContentParserDecorator.java new file mode 100644 index 0000000000000..fc34a486d4aa7 --- /dev/null +++ b/server/src/main/java/org/elasticsearch/plugins/internal/XContentParserDecorator.java @@ -0,0 +1,17 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License + * 2.0 and the Server Side Public License, v 1; you may not use this file except + * in compliance with, at your election, the Elastic License 2.0 or the Server + * Side Public License, v 1. + */ + +package org.elasticsearch.plugins.internal; + +import org.elasticsearch.xcontent.XContentParser; + +public interface XContentParserDecorator { + XContentParserDecorator NOOP = parser -> parser; + + XContentParser decorate(XContentParser xContentParser); +} diff --git a/server/src/test/java/org/elasticsearch/action/bulk/TransportShardBulkActionTests.java b/server/src/test/java/org/elasticsearch/action/bulk/TransportShardBulkActionTests.java index 1f54d8dd1edd5..b83b26321a3e7 100644 --- a/server/src/test/java/org/elasticsearch/action/bulk/TransportShardBulkActionTests.java +++ b/server/src/test/java/org/elasticsearch/action/bulk/TransportShardBulkActionTests.java @@ -599,7 +599,7 @@ public void testUpdateRequestWithConflictFailure() throws Exception { IndexRequest updateResponse = new IndexRequest("index").id("id") .source(Requests.INDEX_CONTENT_TYPE, "field", "value") - .noParsedBytesToReport();// let's pretend this was modified by a script + .setNormalisedBytesParsed(0);// let's pretend this was modified by a script DocumentParsingProvider documentParsingProvider = mock(DocumentParsingProvider.class); Exception err = new VersionConflictEngineException(shardId, "id", "I'm conflicted <(;_;)>"); @@ -655,7 +655,7 @@ public void testUpdateRequestWithConflictFailure() throws Exception { // we have set 0 value on normalisedBytesParsed on the IndexRequest, like it happens with updates by script. ArgumentCaptor argument = ArgumentCaptor.forClass(IndexRequest.class); - verify(documentParsingProvider, times(retries + 1)).newDocumentSizeObserver(argument.capture()); + verify(documentParsingProvider, times(retries + 1)).newMeteringParserDecorator(argument.capture()); IndexRequest value = argument.getValue(); assertThat(value.getNormalisedBytesParsed(), equalTo(0L)); } @@ -722,7 +722,7 @@ public void testUpdateRequestWithSuccess() throws Exception { assertThat(response.getSeqNo(), equalTo(13L)); ArgumentCaptor argument = ArgumentCaptor.forClass(IndexRequest.class); - verify(documentParsingProvider, times(1)).newDocumentSizeObserver(argument.capture()); + verify(documentParsingProvider, times(1)).newMeteringParserDecorator(argument.capture()); IndexRequest value = argument.getValue(); assertThat(value.getNormalisedBytesParsed(), equalTo(100L)); } diff --git a/server/src/test/java/org/elasticsearch/index/IndexingSlowLogTests.java b/server/src/test/java/org/elasticsearch/index/IndexingSlowLogTests.java index c743a83208a24..75b63da0227ed 100644 --- a/server/src/test/java/org/elasticsearch/index/IndexingSlowLogTests.java +++ b/server/src/test/java/org/elasticsearch/index/IndexingSlowLogTests.java @@ -27,10 +27,10 @@ import org.elasticsearch.index.engine.Engine; import org.elasticsearch.index.engine.EngineTestCase; import org.elasticsearch.index.mapper.ParsedDocument; +import org.elasticsearch.index.mapper.ParsedDocument.DocumentSize; import org.elasticsearch.index.mapper.SeqNoFieldMapper; import org.elasticsearch.index.mapper.Uid; import org.elasticsearch.index.shard.ShardId; -import org.elasticsearch.plugins.internal.DocumentSizeObserver; import org.elasticsearch.test.ESTestCase; import org.elasticsearch.xcontent.XContentParseException; import org.elasticsearch.xcontent.XContentType; @@ -211,7 +211,7 @@ public void testSlowLogMessageHasJsonFields() throws IOException { source, XContentType.JSON, null, - DocumentSizeObserver.EMPTY_INSTANCE + DocumentSize.UNKNOWN ); Index index = new Index("foo", "123"); // Turning off document logging doesn't log source[] @@ -240,7 +240,7 @@ public void testSlowLogMessageHasAdditionalFields() throws IOException { source, XContentType.JSON, null, - DocumentSizeObserver.EMPTY_INSTANCE + DocumentSize.UNKNOWN ); Index index = new Index("foo", "123"); // Turning off document logging doesn't log source[] @@ -270,7 +270,7 @@ public void testEmptyRoutingField() throws IOException { source, XContentType.JSON, null, - DocumentSizeObserver.EMPTY_INSTANCE + DocumentSize.UNKNOWN ); Index index = new Index("foo", "123"); @@ -289,7 +289,7 @@ public void testSlowLogParsedDocumentPrinterSourceToLog() throws IOException { source, XContentType.JSON, null, - DocumentSizeObserver.EMPTY_INSTANCE + DocumentSize.UNKNOWN ); Index index = new Index("foo", "123"); // Turning off document logging doesn't log source[] @@ -321,7 +321,7 @@ public void testSlowLogParsedDocumentPrinterSourceToLog() throws IOException { source, XContentType.JSON, null, - DocumentSizeObserver.EMPTY_INSTANCE + DocumentSize.UNKNOWN ); final XContentParseException e = expectThrows( diff --git a/server/src/test/java/org/elasticsearch/index/engine/InternalEngineTests.java b/server/src/test/java/org/elasticsearch/index/engine/InternalEngineTests.java index cc636fb1bc995..77f5fa301c78d 100644 --- a/server/src/test/java/org/elasticsearch/index/engine/InternalEngineTests.java +++ b/server/src/test/java/org/elasticsearch/index/engine/InternalEngineTests.java @@ -108,6 +108,7 @@ import org.elasticsearch.index.mapper.MapperService; import org.elasticsearch.index.mapper.MappingLookup; import org.elasticsearch.index.mapper.ParsedDocument; +import org.elasticsearch.index.mapper.ParsedDocument.DocumentSize; import org.elasticsearch.index.mapper.SeqNoFieldMapper; import org.elasticsearch.index.mapper.SourceFieldMapper; import org.elasticsearch.index.mapper.Uid; @@ -129,7 +130,6 @@ import org.elasticsearch.index.translog.TranslogDeletionPolicy; import org.elasticsearch.index.translog.TranslogOperationsUtils; import org.elasticsearch.indices.breaker.NoneCircuitBreakerService; -import org.elasticsearch.plugins.internal.DocumentSizeObserver; import org.elasticsearch.test.IndexSettingsModule; import org.elasticsearch.test.index.IndexVersionUtils; import org.elasticsearch.threadpool.ThreadPool; @@ -5510,7 +5510,7 @@ public void testSeqNoGenerator() throws IOException { source, XContentType.JSON, null, - DocumentSizeObserver.EMPTY_INSTANCE + DocumentSize.UNKNOWN ); final Engine.Index index = new Engine.Index( diff --git a/server/src/test/java/org/elasticsearch/index/mapper/DynamicTemplatesTests.java b/server/src/test/java/org/elasticsearch/index/mapper/DynamicTemplatesTests.java index cd8a59a1a6ad5..61926d72982d8 100644 --- a/server/src/test/java/org/elasticsearch/index/mapper/DynamicTemplatesTests.java +++ b/server/src/test/java/org/elasticsearch/index/mapper/DynamicTemplatesTests.java @@ -21,7 +21,7 @@ import org.elasticsearch.common.xcontent.XContentHelper; import org.elasticsearch.index.IndexVersion; import org.elasticsearch.index.IndexVersions; -import org.elasticsearch.plugins.internal.DocumentSizeObserver; +import org.elasticsearch.plugins.internal.XContentMeteringParserDecorator; import org.elasticsearch.test.XContentTestUtils; import org.elasticsearch.test.index.IndexVersionUtils; import org.elasticsearch.xcontent.XContentBuilder; @@ -739,7 +739,7 @@ public void testTemplateWithoutMatchPredicates() throws Exception { XContentType.JSON, null, Map.of("foo", "geo_point"), - DocumentSizeObserver.EMPTY_INSTANCE + XContentMeteringParserDecorator.NOOP ) ); assertThat(doc.rootDoc().getFields("foo"), hasSize(2)); diff --git a/server/src/test/java/org/elasticsearch/index/shard/RefreshListenersTests.java b/server/src/test/java/org/elasticsearch/index/shard/RefreshListenersTests.java index 2b333277e2d4a..55bfc2480d4d6 100644 --- a/server/src/test/java/org/elasticsearch/index/shard/RefreshListenersTests.java +++ b/server/src/test/java/org/elasticsearch/index/shard/RefreshListenersTests.java @@ -44,6 +44,7 @@ import org.elasticsearch.index.mapper.LuceneDocument; import org.elasticsearch.index.mapper.MapperService; import org.elasticsearch.index.mapper.ParsedDocument; +import org.elasticsearch.index.mapper.ParsedDocument.DocumentSize; import org.elasticsearch.index.mapper.SeqNoFieldMapper; import org.elasticsearch.index.mapper.Uid; import org.elasticsearch.index.seqno.RetentionLeases; @@ -52,7 +53,6 @@ import org.elasticsearch.index.translog.Translog; import org.elasticsearch.index.translog.TranslogConfig; import org.elasticsearch.indices.breaker.NoneCircuitBreakerService; -import org.elasticsearch.plugins.internal.DocumentSizeObserver; import org.elasticsearch.test.DummyShardLock; import org.elasticsearch.test.ESTestCase; import org.elasticsearch.test.IndexSettingsModule; @@ -567,7 +567,7 @@ private Engine.IndexResult index(String id, String testFieldValue) throws IOExce source, XContentType.JSON, null, - DocumentSizeObserver.EMPTY_INSTANCE + DocumentSize.UNKNOWN ); Engine.Index index = new Engine.Index(uid, engine.config().getPrimaryTermSupplier().getAsLong(), doc); return engine.index(index); diff --git a/server/src/test/java/org/elasticsearch/index/translog/TranslogTests.java b/server/src/test/java/org/elasticsearch/index/translog/TranslogTests.java index 8a277e400ad6c..c0de7947dbb60 100644 --- a/server/src/test/java/org/elasticsearch/index/translog/TranslogTests.java +++ b/server/src/test/java/org/elasticsearch/index/translog/TranslogTests.java @@ -55,6 +55,7 @@ import org.elasticsearch.index.mapper.IdFieldMapper; import org.elasticsearch.index.mapper.LuceneDocument; import org.elasticsearch.index.mapper.ParsedDocument; +import org.elasticsearch.index.mapper.ParsedDocument.DocumentSize; import org.elasticsearch.index.mapper.SeqNoFieldMapper; import org.elasticsearch.index.mapper.Uid; import org.elasticsearch.index.seqno.LocalCheckpointTracker; @@ -62,7 +63,6 @@ import org.elasticsearch.index.seqno.SequenceNumbers; import org.elasticsearch.index.shard.ShardId; import org.elasticsearch.index.translog.Translog.Location; -import org.elasticsearch.plugins.internal.DocumentSizeObserver; import org.elasticsearch.test.ESTestCase; import org.elasticsearch.test.IndexSettingsModule; import org.elasticsearch.test.TransportVersionUtils; @@ -3394,7 +3394,7 @@ public void testTranslogOpSerialization() throws Exception { B_1, XContentType.JSON, null, - DocumentSizeObserver.EMPTY_INSTANCE + DocumentSize.UNKNOWN ); Engine.Index eIndex = new Engine.Index( diff --git a/server/src/test/java/org/elasticsearch/ingest/IngestServiceTests.java b/server/src/test/java/org/elasticsearch/ingest/IngestServiceTests.java index 5621ed468f557..bc81614c9e237 100644 --- a/server/src/test/java/org/elasticsearch/ingest/IngestServiceTests.java +++ b/server/src/test/java/org/elasticsearch/ingest/IngestServiceTests.java @@ -50,9 +50,10 @@ import org.elasticsearch.index.IndexSettings; import org.elasticsearch.index.IndexVersion; import org.elasticsearch.index.VersionType; +import org.elasticsearch.index.mapper.ParsedDocument; import org.elasticsearch.plugins.IngestPlugin; import org.elasticsearch.plugins.internal.DocumentParsingProvider; -import org.elasticsearch.plugins.internal.DocumentSizeObserver; +import org.elasticsearch.plugins.internal.XContentMeteringParserDecorator; import org.elasticsearch.script.MockScriptEngine; import org.elasticsearch.script.Script; import org.elasticsearch.script.ScriptModule; @@ -1175,20 +1176,19 @@ public void testExecuteBulkRequestCallsDocumentSizeObserver() { AtomicInteger parsedValueWasUsed = new AtomicInteger(0); DocumentParsingProvider documentParsingProvider = new DocumentParsingProvider() { @Override - public DocumentSizeObserver newDocumentSizeObserver(DocWriteRequest request) { - return new DocumentSizeObserver() { + public XContentMeteringParserDecorator newMeteringParserDecorator(DocWriteRequest request) { + return new XContentMeteringParserDecorator() { @Override - public XContentParser wrapParser(XContentParser xContentParser) { - wrappedObserverWasUsed.incrementAndGet(); - return xContentParser; + public ParsedDocument.DocumentSize meteredDocumentSize() { + parsedValueWasUsed.incrementAndGet(); + return new ParsedDocument.DocumentSize(0, 0); } @Override - public long normalisedBytesParsed() { - parsedValueWasUsed.incrementAndGet(); - return 0; + public XContentParser decorate(XContentParser xContentParser) { + wrappedObserverWasUsed.incrementAndGet(); + return xContentParser; } - }; } }; diff --git a/test/framework/src/main/java/org/elasticsearch/index/engine/EngineTestCase.java b/test/framework/src/main/java/org/elasticsearch/index/engine/EngineTestCase.java index 290b4ac6dd3e3..f383a1aaab12d 100644 --- a/test/framework/src/main/java/org/elasticsearch/index/engine/EngineTestCase.java +++ b/test/framework/src/main/java/org/elasticsearch/index/engine/EngineTestCase.java @@ -99,7 +99,6 @@ import org.elasticsearch.index.translog.TranslogDeletionPolicy; import org.elasticsearch.indices.breaker.CircuitBreakerService; import org.elasticsearch.indices.breaker.NoneCircuitBreakerService; -import org.elasticsearch.plugins.internal.DocumentSizeObserver; import org.elasticsearch.test.DummyShardLock; import org.elasticsearch.test.ESTestCase; import org.elasticsearch.test.IndexSettingsModule; @@ -429,7 +428,7 @@ protected static ParsedDocument testParsedDocument( source, XContentType.JSON, mappingUpdate, - DocumentSizeObserver.EMPTY_INSTANCE + ParsedDocument.DocumentSize.UNKNOWN ); } diff --git a/test/framework/src/main/java/org/elasticsearch/index/mapper/MapperServiceTestCase.java b/test/framework/src/main/java/org/elasticsearch/index/mapper/MapperServiceTestCase.java index b5a42efd67088..c5aa03d5548f6 100644 --- a/test/framework/src/main/java/org/elasticsearch/index/mapper/MapperServiceTestCase.java +++ b/test/framework/src/main/java/org/elasticsearch/index/mapper/MapperServiceTestCase.java @@ -61,7 +61,7 @@ import org.elasticsearch.plugins.MapperPlugin; import org.elasticsearch.plugins.Plugin; import org.elasticsearch.plugins.TelemetryPlugin; -import org.elasticsearch.plugins.internal.DocumentSizeObserver; +import org.elasticsearch.plugins.internal.XContentMeteringParserDecorator; import org.elasticsearch.script.Script; import org.elasticsearch.script.ScriptCompiler; import org.elasticsearch.script.ScriptContext; @@ -383,7 +383,7 @@ protected static SourceToParse source( XContentType.JSON, routing, dynamicTemplates, - DocumentSizeObserver.EMPTY_INSTANCE + XContentMeteringParserDecorator.NOOP ); } From 08c6e6ba0fabc9fbed76ed187e0d71bc7669b9dd Mon Sep 17 00:00:00 2001 From: Pablo Machado Date: Fri, 2 Aug 2024 10:16:34 +0200 Subject: [PATCH 17/36] Fix broken MvPSeriesWeightedSum tests (#111488) * Fix broken MvPSeriesWeightedSum tests --- muted-tests.yml | 6 ------ .../multivalue/MvPSeriesWeightedSumTests.java | 15 ++++++++++----- 2 files changed, 10 insertions(+), 11 deletions(-) diff --git a/muted-tests.yml b/muted-tests.yml index 256a7b7ba6987..aaec3345782e8 100644 --- a/muted-tests.yml +++ b/muted-tests.yml @@ -135,12 +135,6 @@ tests: - class: org.elasticsearch.xpack.restart.FullClusterRestartIT method: testDataStreams {cluster=UPGRADED} issue: https://github.com/elastic/elasticsearch/issues/111448 -- class: org.elasticsearch.xpack.esql.expression.function.scalar.multivalue.MvPSeriesWeightedSumTests - method: testFold {TestCase=, } - issue: https://github.com/elastic/elasticsearch/issues/111479 -- class: org.elasticsearch.xpack.esql.expression.function.scalar.multivalue.MvPSeriesWeightedSumTests - method: testEvaluateBlockWithoutNulls {TestCase=, } - issue: https://github.com/elastic/elasticsearch/issues/111498 - class: org.elasticsearch.search.SearchServiceTests issue: https://github.com/elastic/elasticsearch/issues/111529 diff --git a/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/expression/function/scalar/multivalue/MvPSeriesWeightedSumTests.java b/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/expression/function/scalar/multivalue/MvPSeriesWeightedSumTests.java index d7a2b530007ad..0f277485b874d 100644 --- a/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/expression/function/scalar/multivalue/MvPSeriesWeightedSumTests.java +++ b/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/expression/function/scalar/multivalue/MvPSeriesWeightedSumTests.java @@ -15,6 +15,7 @@ import org.elasticsearch.xpack.esql.core.type.DataType; import org.elasticsearch.xpack.esql.expression.function.AbstractScalarFunctionTestCase; import org.elasticsearch.xpack.esql.expression.function.TestCaseSupplier; +import org.hamcrest.Matcher; import java.util.ArrayList; import java.util.List; @@ -43,10 +44,10 @@ protected Expression build(Source source, List args) { } private static void doubles(List cases) { - - cases.add(new TestCaseSupplier(List.of(DataType.DOUBLE, DataType.DOUBLE), () -> { - List field = randomList(1, 10, () -> randomDouble()); - double p = randomDoubleBetween(-100.0, 100.0, true); + cases.add(new TestCaseSupplier("most common scenario", List.of(DataType.DOUBLE, DataType.DOUBLE), () -> { + List field = randomList(1, 10, () -> randomDoubleBetween(1, 10, false)); + double p = randomDoubleBetween(-10, 10, true); + double expectedResult = calcPSeriesWeightedSum(field, p); return new TestCaseSupplier.TestCase( List.of( @@ -55,11 +56,15 @@ private static void doubles(List cases) { ), "MvPSeriesWeightedSumDoubleEvaluator[block=Attribute[channel=0], p=" + p + "]", DataType.DOUBLE, - closeTo(calcPSeriesWeightedSum(field, p), 0.00000001) + match(expectedResult) ); })); } + private static Matcher match(Double value) { + return closeTo(value, Math.abs(value * .00000001)); + } + private static double calcPSeriesWeightedSum(List field, double p) { double sum = 0; for (int i = 0; i < field.size(); i++) { From c4235d56cfe91a4be478978c00a3d1a9cac812e3 Mon Sep 17 00:00:00 2001 From: Luigi Dell'Aquila Date: Fri, 2 Aug 2024 10:32:34 +0200 Subject: [PATCH 18/36] ES|QL: improve serialization of FieldAttributes (#111447) Attributes are shared multiple times in query plans. In addition, multiple FieldAttributes can share the same parent FieldAttribute. Send each Attribute instance only once; the second time, send a serialization ID, that will be used to cache and retrieve the instance. --- .../org/elasticsearch/TransportVersions.java | 1 + .../esql/core/expression/FieldAttribute.java | 31 ++-- .../core/expression/MetadataAttribute.java | 27 ++- .../core/expression/ReferenceAttribute.java | 25 ++- .../xpack/esql/core/util/PlanStreamInput.java | 11 ++ .../esql/core/util/PlanStreamOutput.java | 24 +++ .../function/UnsupportedAttribute.java | 25 ++- .../xpack/esql/io/stream/PlanNamedTypes.java | 2 +- .../xpack/esql/io/stream/PlanStreamInput.java | 49 +++++ .../esql/io/stream/PlanStreamOutput.java | 61 ++++++- .../function/AbstractAttributeTestCase.java | 4 +- .../function/MetadataAttributeTests.java | 4 + .../function/UnsupportedAttributeTests.java | 4 + .../esql/io/stream/PlanStreamOutputTests.java | 172 +++++++++++++++++- 14 files changed, 394 insertions(+), 46 deletions(-) create mode 100644 x-pack/plugin/esql-core/src/main/java/org/elasticsearch/xpack/esql/core/util/PlanStreamOutput.java diff --git a/server/src/main/java/org/elasticsearch/TransportVersions.java b/server/src/main/java/org/elasticsearch/TransportVersions.java index 8d0c071225c45..cf501f5389408 100644 --- a/server/src/main/java/org/elasticsearch/TransportVersions.java +++ b/server/src/main/java/org/elasticsearch/TransportVersions.java @@ -182,6 +182,7 @@ static TransportVersion def(int id) { public static final TransportVersion ML_ADD_DETECTION_RULE_PARAMS = def(8_712_00_0); public static final TransportVersion FIX_VECTOR_SIMILARITY_INNER_HITS = def(8_713_00_0); public static final TransportVersion INDEX_REQUEST_UPDATE_BY_DOC_ORIGIN = def(8_714_00_0); + public static final TransportVersion ESQL_ATTRIBUTE_CACHED_SERIALIZATION = def(8_715_00_0); /* * STOP! READ THIS FIRST! No, really, diff --git a/x-pack/plugin/esql-core/src/main/java/org/elasticsearch/xpack/esql/core/expression/FieldAttribute.java b/x-pack/plugin/esql-core/src/main/java/org/elasticsearch/xpack/esql/core/expression/FieldAttribute.java index a3bc7ea621d8a..15578392c7f30 100644 --- a/x-pack/plugin/esql-core/src/main/java/org/elasticsearch/xpack/esql/core/expression/FieldAttribute.java +++ b/x-pack/plugin/esql-core/src/main/java/org/elasticsearch/xpack/esql/core/expression/FieldAttribute.java @@ -15,6 +15,7 @@ import org.elasticsearch.xpack.esql.core.type.DataType; import org.elasticsearch.xpack.esql.core.type.EsField; import org.elasticsearch.xpack.esql.core.util.PlanStreamInput; +import org.elasticsearch.xpack.esql.core.util.PlanStreamOutput; import org.elasticsearch.xpack.esql.core.util.StringUtils; import java.io.IOException; @@ -36,7 +37,7 @@ public class FieldAttribute extends TypedAttribute { static final NamedWriteableRegistry.Entry ENTRY = new NamedWriteableRegistry.Entry( Attribute.class, "FieldAttribute", - FieldAttribute::new + FieldAttribute::readFrom ); private final FieldAttribute parent; @@ -81,7 +82,7 @@ public FieldAttribute( this.field = field; } - public FieldAttribute(StreamInput in) throws IOException { + private FieldAttribute(StreamInput in) throws IOException { /* * The funny casting dance with `(StreamInput & PlanStreamInput) in` is required * because we're in esql-core here and the real PlanStreamInput is in @@ -92,7 +93,7 @@ public FieldAttribute(StreamInput in) throws IOException { */ this( Source.readFrom((StreamInput & PlanStreamInput) in), - in.readOptionalWriteable(FieldAttribute::new), + in.readOptionalWriteable(FieldAttribute::readFrom), in.readString(), DataType.readFrom(in), in.readNamedWriteable(EsField.class), @@ -105,15 +106,21 @@ public FieldAttribute(StreamInput in) throws IOException { @Override public void writeTo(StreamOutput out) throws IOException { - Source.EMPTY.writeTo(out); - out.writeOptionalWriteable(parent); - out.writeString(name()); - dataType().writeTo(out); - out.writeNamedWriteable(field); - out.writeOptionalString(qualifier()); - out.writeEnum(nullable()); - id().writeTo(out); - out.writeBoolean(synthetic()); + if (((PlanStreamOutput) out).writeAttributeCacheHeader(this)) { + Source.EMPTY.writeTo(out); + out.writeOptionalWriteable(parent); + out.writeString(name()); + dataType().writeTo(out); + out.writeNamedWriteable(field); + out.writeOptionalString(qualifier()); + out.writeEnum(nullable()); + id().writeTo(out); + out.writeBoolean(synthetic()); + } + } + + public static FieldAttribute readFrom(StreamInput in) throws IOException { + return ((PlanStreamInput) in).readAttributeWithCache(FieldAttribute::new); } @Override diff --git a/x-pack/plugin/esql-core/src/main/java/org/elasticsearch/xpack/esql/core/expression/MetadataAttribute.java b/x-pack/plugin/esql-core/src/main/java/org/elasticsearch/xpack/esql/core/expression/MetadataAttribute.java index fd7382b0098c9..7aa63d91aab09 100644 --- a/x-pack/plugin/esql-core/src/main/java/org/elasticsearch/xpack/esql/core/expression/MetadataAttribute.java +++ b/x-pack/plugin/esql-core/src/main/java/org/elasticsearch/xpack/esql/core/expression/MetadataAttribute.java @@ -19,6 +19,7 @@ import org.elasticsearch.xpack.esql.core.tree.Source; import org.elasticsearch.xpack.esql.core.type.DataType; import org.elasticsearch.xpack.esql.core.util.PlanStreamInput; +import org.elasticsearch.xpack.esql.core.util.PlanStreamOutput; import java.io.IOException; import java.util.Map; @@ -33,7 +34,7 @@ public class MetadataAttribute extends TypedAttribute { static final NamedWriteableRegistry.Entry ENTRY = new NamedWriteableRegistry.Entry( Attribute.class, "MetadataAttribute", - MetadataAttribute::new + MetadataAttribute::readFrom ); private static final Map> ATTRIBUTES_MAP = Map.of( @@ -72,7 +73,7 @@ public MetadataAttribute(Source source, String name, DataType dataType, boolean } @SuppressWarnings("unchecked") - public MetadataAttribute(StreamInput in) throws IOException { + private MetadataAttribute(StreamInput in) throws IOException { /* * The funny casting dance with `(StreamInput & PlanStreamInput) in` is required * because we're in esql-core here and the real PlanStreamInput is in @@ -95,14 +96,20 @@ public MetadataAttribute(StreamInput in) throws IOException { @Override public void writeTo(StreamOutput out) throws IOException { - Source.EMPTY.writeTo(out); - out.writeString(name()); - dataType().writeTo(out); - out.writeOptionalString(qualifier()); - out.writeEnum(nullable()); - id().writeTo(out); - out.writeBoolean(synthetic()); - out.writeBoolean(searchable); + if (((PlanStreamOutput) out).writeAttributeCacheHeader(this)) { + Source.EMPTY.writeTo(out); + out.writeString(name()); + dataType().writeTo(out); + out.writeOptionalString(qualifier()); + out.writeEnum(nullable()); + id().writeTo(out); + out.writeBoolean(synthetic()); + out.writeBoolean(searchable); + } + } + + public static MetadataAttribute readFrom(StreamInput in) throws IOException { + return ((PlanStreamInput) in).readAttributeWithCache(MetadataAttribute::new); } @Override diff --git a/x-pack/plugin/esql-core/src/main/java/org/elasticsearch/xpack/esql/core/expression/ReferenceAttribute.java b/x-pack/plugin/esql-core/src/main/java/org/elasticsearch/xpack/esql/core/expression/ReferenceAttribute.java index d9a70787a56ed..24bf95dcf660a 100644 --- a/x-pack/plugin/esql-core/src/main/java/org/elasticsearch/xpack/esql/core/expression/ReferenceAttribute.java +++ b/x-pack/plugin/esql-core/src/main/java/org/elasticsearch/xpack/esql/core/expression/ReferenceAttribute.java @@ -13,6 +13,7 @@ import org.elasticsearch.xpack.esql.core.tree.Source; import org.elasticsearch.xpack.esql.core.type.DataType; import org.elasticsearch.xpack.esql.core.util.PlanStreamInput; +import org.elasticsearch.xpack.esql.core.util.PlanStreamOutput; import java.io.IOException; @@ -23,7 +24,7 @@ public class ReferenceAttribute extends TypedAttribute { static final NamedWriteableRegistry.Entry ENTRY = new NamedWriteableRegistry.Entry( Attribute.class, "ReferenceAttribute", - ReferenceAttribute::new + ReferenceAttribute::readFrom ); public ReferenceAttribute(Source source, String name, DataType dataType) { @@ -43,7 +44,7 @@ public ReferenceAttribute( } @SuppressWarnings("unchecked") - public ReferenceAttribute(StreamInput in) throws IOException { + private ReferenceAttribute(StreamInput in) throws IOException { /* * The funny casting dance with `(StreamInput & PlanStreamInput) in` is required * because we're in esql-core here and the real PlanStreamInput is in @@ -65,13 +66,19 @@ public ReferenceAttribute(StreamInput in) throws IOException { @Override public void writeTo(StreamOutput out) throws IOException { - Source.EMPTY.writeTo(out); - out.writeString(name()); - dataType().writeTo(out); - out.writeOptionalString(qualifier()); - out.writeEnum(nullable()); - id().writeTo(out); - out.writeBoolean(synthetic()); + if (((PlanStreamOutput) out).writeAttributeCacheHeader(this)) { + Source.EMPTY.writeTo(out); + out.writeString(name()); + dataType().writeTo(out); + out.writeOptionalString(qualifier()); + out.writeEnum(nullable()); + id().writeTo(out); + out.writeBoolean(synthetic()); + } + } + + public static ReferenceAttribute readFrom(StreamInput in) throws IOException { + return ((PlanStreamInput) in).readAttributeWithCache(ReferenceAttribute::new); } @Override diff --git a/x-pack/plugin/esql-core/src/main/java/org/elasticsearch/xpack/esql/core/util/PlanStreamInput.java b/x-pack/plugin/esql-core/src/main/java/org/elasticsearch/xpack/esql/core/util/PlanStreamInput.java index df8fac06dd478..01a153feeb473 100644 --- a/x-pack/plugin/esql-core/src/main/java/org/elasticsearch/xpack/esql/core/util/PlanStreamInput.java +++ b/x-pack/plugin/esql-core/src/main/java/org/elasticsearch/xpack/esql/core/util/PlanStreamInput.java @@ -7,6 +7,9 @@ package org.elasticsearch.xpack.esql.core.util; +import org.elasticsearch.common.io.stream.StreamInput; +import org.elasticsearch.core.CheckedFunction; +import org.elasticsearch.xpack.esql.core.expression.Attribute; import org.elasticsearch.xpack.esql.core.expression.NameId; import org.elasticsearch.xpack.esql.core.tree.Source; @@ -33,4 +36,12 @@ public interface PlanStreamInput { * the same result. */ NameId mapNameId(long id) throws IOException; + + /** + * Reads an Attribute using the attribute cache. + * @param constructor the constructor needed to build the actual attribute when read from the wire + * @return An attribute; this will generally be the same type as the provided constructor + * @throws IOException + */ + A readAttributeWithCache(CheckedFunction constructor) throws IOException; } diff --git a/x-pack/plugin/esql-core/src/main/java/org/elasticsearch/xpack/esql/core/util/PlanStreamOutput.java b/x-pack/plugin/esql-core/src/main/java/org/elasticsearch/xpack/esql/core/util/PlanStreamOutput.java new file mode 100644 index 0000000000000..cec68c06e492e --- /dev/null +++ b/x-pack/plugin/esql-core/src/main/java/org/elasticsearch/xpack/esql/core/util/PlanStreamOutput.java @@ -0,0 +1,24 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License + * 2.0; you may not use this file except in compliance with the Elastic License + * 2.0. + */ + +package org.elasticsearch.xpack.esql.core.util; + +import org.elasticsearch.xpack.esql.core.expression.Attribute; + +import java.io.IOException; + +public interface PlanStreamOutput { + + /** + * Writes a cache header for an {@link Attribute} and caches it if it is not already in the cache. + * In that case, the attribute will have to serialize itself into this stream immediately after this method call. + * @param attribute The attribute to serialize + * @return true if the attribute needs to serialize itself, false otherwise (ie. if already cached) + * @throws IOException + */ + boolean writeAttributeCacheHeader(Attribute attribute) throws IOException; +} diff --git a/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/expression/function/UnsupportedAttribute.java b/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/expression/function/UnsupportedAttribute.java index a553361f60a18..bf9c9eaa3c407 100644 --- a/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/expression/function/UnsupportedAttribute.java +++ b/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/expression/function/UnsupportedAttribute.java @@ -21,6 +21,7 @@ import org.elasticsearch.xpack.esql.core.tree.Source; import org.elasticsearch.xpack.esql.core.type.DataType; import org.elasticsearch.xpack.esql.core.type.UnsupportedEsField; +import org.elasticsearch.xpack.esql.core.util.PlanStreamOutput; import org.elasticsearch.xpack.esql.io.stream.PlanStreamInput; import java.io.IOException; @@ -36,17 +37,17 @@ public final class UnsupportedAttribute extends FieldAttribute implements Unreso public static final NamedWriteableRegistry.Entry ENTRY = new NamedWriteableRegistry.Entry( Attribute.class, "UnsupportedAttribute", - UnsupportedAttribute::new + UnsupportedAttribute::readFrom ); public static final NamedWriteableRegistry.Entry NAMED_EXPRESSION_ENTRY = new NamedWriteableRegistry.Entry( NamedExpression.class, ENTRY.name, - UnsupportedAttribute::new + UnsupportedAttribute::readFrom ); public static final NamedWriteableRegistry.Entry EXPRESSION_ENTRY = new NamedWriteableRegistry.Entry( Expression.class, ENTRY.name, - UnsupportedAttribute::new + UnsupportedAttribute::readFrom ); private final String message; @@ -70,7 +71,7 @@ public UnsupportedAttribute(Source source, String name, UnsupportedEsField field this.message = customMessage == null ? errorMessage(qualifiedName(), field) : customMessage; } - public UnsupportedAttribute(StreamInput in) throws IOException { + private UnsupportedAttribute(StreamInput in) throws IOException { this( Source.readFrom((PlanStreamInput) in), in.readString(), @@ -82,11 +83,17 @@ public UnsupportedAttribute(StreamInput in) throws IOException { @Override public void writeTo(StreamOutput out) throws IOException { - Source.EMPTY.writeTo(out); - out.writeString(name()); - field().writeTo(out); - out.writeOptionalString(hasCustomMessage ? message : null); - id().writeTo(out); + if (((PlanStreamOutput) out).writeAttributeCacheHeader(this)) { + Source.EMPTY.writeTo(out); + out.writeString(name()); + field().writeTo(out); + out.writeOptionalString(hasCustomMessage ? message : null); + id().writeTo(out); + } + } + + public static UnsupportedAttribute readFrom(StreamInput in) throws IOException { + return ((PlanStreamInput) in).readAttributeWithCache(UnsupportedAttribute::new); } @Override diff --git a/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/io/stream/PlanNamedTypes.java b/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/io/stream/PlanNamedTypes.java index 934180e5b21de..aac1de4fadafb 100644 --- a/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/io/stream/PlanNamedTypes.java +++ b/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/io/stream/PlanNamedTypes.java @@ -752,7 +752,7 @@ static void writeProject(PlanStreamOutput out, Project project) throws IOExcepti static EsQueryExec.FieldSort readFieldSort(PlanStreamInput in) throws IOException { return new EsQueryExec.FieldSort( - new FieldAttribute(in), + FieldAttribute.readFrom(in), in.readEnum(Order.OrderDirection.class), in.readEnum(Order.NullsPosition.class) ); diff --git a/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/io/stream/PlanStreamInput.java b/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/io/stream/PlanStreamInput.java index 0633595a5796d..5a59d99125e47 100644 --- a/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/io/stream/PlanStreamInput.java +++ b/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/io/stream/PlanStreamInput.java @@ -7,6 +7,8 @@ package org.elasticsearch.xpack.esql.io.stream; +import org.apache.lucene.util.ArrayUtil; +import org.elasticsearch.TransportVersions; import org.elasticsearch.common.breaker.CircuitBreaker; import org.elasticsearch.common.breaker.NoopCircuitBreaker; import org.elasticsearch.common.io.stream.NamedWriteableAwareStreamInput; @@ -21,8 +23,10 @@ import org.elasticsearch.compute.data.DoubleBigArrayBlock; import org.elasticsearch.compute.data.IntBigArrayBlock; import org.elasticsearch.compute.data.LongBigArrayBlock; +import org.elasticsearch.core.CheckedFunction; import org.elasticsearch.core.Releasables; import org.elasticsearch.xpack.esql.Column; +import org.elasticsearch.xpack.esql.core.expression.Attribute; import org.elasticsearch.xpack.esql.core.expression.NameId; import org.elasticsearch.xpack.esql.io.stream.PlanNameRegistry.PlanNamedReader; import org.elasticsearch.xpack.esql.io.stream.PlanNameRegistry.PlanReader; @@ -60,6 +64,8 @@ public NameId apply(long streamNameId) { private final Map cachedBlocks = new HashMap<>(); + private Attribute[] attributesCache = new Attribute[64]; + private final PlanNameRegistry registry; // hook for nameId, where can cache and map, for now just return a NameId of the same long value. @@ -206,4 +212,47 @@ static void throwOnNullOptionalRead(Class type) throws IOException { public NameId mapNameId(long l) { return nameIdFunction.apply(l); } + + /** + * @param constructor the constructor needed to build the actual attribute when read from the wire + * @throws IOException + */ + @Override + @SuppressWarnings("unchecked") + public A readAttributeWithCache(CheckedFunction constructor) throws IOException { + if (getTransportVersion().onOrAfter(TransportVersions.ESQL_ATTRIBUTE_CACHED_SERIALIZATION)) { + // it's safe to cast to int, since the max value for this is {@link PlanStreamOutput#MAX_SERIALIZED_ATTRIBUTES} + int cacheId = Math.toIntExact(readZLong()); + if (cacheId < 0) { + cacheId = -1 - cacheId; + Attribute result = constructor.apply(this); + cacheAttribute(cacheId, result); + return (A) result; + } else { + return (A) attributeFromCache(cacheId); + } + } else { + return constructor.apply(this); + } + } + + private Attribute attributeFromCache(int id) throws IOException { + if (attributesCache[id] == null) { + throw new IOException("Attribute ID not found in serialization cache [" + id + "]"); + } + return attributesCache[id]; + } + + /** + * Add and attribute to the cache, based on the serialization ID generated by {@link PlanStreamOutput} + * @param id The ID that will reference the attribute. Generated at serialization time + * @param attr The attribute to cache + */ + private void cacheAttribute(int id, Attribute attr) { + assert id >= 0; + if (id >= attributesCache.length) { + attributesCache = ArrayUtil.grow(attributesCache); + } + attributesCache[id] = attr; + } } diff --git a/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/io/stream/PlanStreamOutput.java b/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/io/stream/PlanStreamOutput.java index 674476ec4f736..2dcd41abc9dd3 100644 --- a/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/io/stream/PlanStreamOutput.java +++ b/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/io/stream/PlanStreamOutput.java @@ -8,6 +8,7 @@ package org.elasticsearch.xpack.esql.io.stream; import org.elasticsearch.TransportVersion; +import org.elasticsearch.TransportVersions; import org.elasticsearch.common.bytes.BytesReference; import org.elasticsearch.common.io.stream.BytesStreamOutput; import org.elasticsearch.common.io.stream.StreamOutput; @@ -19,6 +20,8 @@ import org.elasticsearch.compute.data.LongBigArrayBlock; import org.elasticsearch.core.Nullable; import org.elasticsearch.xpack.esql.Column; +import org.elasticsearch.xpack.esql.core.InvalidArgumentException; +import org.elasticsearch.xpack.esql.core.expression.Attribute; import org.elasticsearch.xpack.esql.io.stream.PlanNameRegistry.PlanWriter; import org.elasticsearch.xpack.esql.plan.logical.LogicalPlan; import org.elasticsearch.xpack.esql.plan.logical.join.Join; @@ -34,7 +37,14 @@ * A customized stream output used to serialize ESQL physical plan fragments. Complements stream * output with methods that write plan nodes, Attributes, Expressions, etc. */ -public final class PlanStreamOutput extends StreamOutput { +public final class PlanStreamOutput extends StreamOutput implements org.elasticsearch.xpack.esql.core.util.PlanStreamOutput { + + /** + * max number of attributes that can be cached for serialization + *

+ * TODO should this be a cluster setting...? + */ + protected static final int MAX_SERIALIZED_ATTRIBUTES = 1_000_000; /** * Cache of written blocks. We use an {@link IdentityHashMap} for this @@ -44,6 +54,16 @@ public final class PlanStreamOutput extends StreamOutput { */ private final Map cachedBlocks = new IdentityHashMap<>(); + /** + * Cache for field attributes. + * Field attributes can be a significant part of the query execution plan, especially + * for queries like `from *`, that can have thousands of output columns. + * Attributes can be shared by many plan nodes (eg. ExcahngeSink output, Project output, EsRelation fields); + * in addition, multiple Attributes can share the same parent field. + * This cache allows to send each attribute only once; from the second occurrence, only an id will be sent + */ + protected final Map cachedAttributes = new IdentityHashMap<>(); + private final StreamOutput delegate; private final PlanNameRegistry registry; @@ -51,16 +71,19 @@ public final class PlanStreamOutput extends StreamOutput { private int nextCachedBlock = 0; + private int maxSerializedAttributes; + public PlanStreamOutput(StreamOutput delegate, PlanNameRegistry registry, @Nullable EsqlConfiguration configuration) throws IOException { - this(delegate, registry, configuration, PlanNamedTypes::name); + this(delegate, registry, configuration, PlanNamedTypes::name, MAX_SERIALIZED_ATTRIBUTES); } public PlanStreamOutput( StreamOutput delegate, PlanNameRegistry registry, @Nullable EsqlConfiguration configuration, - Function, String> nameSupplier + Function, String> nameSupplier, + int maxSerializedAttributes ) throws IOException { this.delegate = delegate; this.registry = registry; @@ -72,6 +95,7 @@ public PlanStreamOutput( } } } + this.maxSerializedAttributes = maxSerializedAttributes; } public void writeLogicalPlanNode(LogicalPlan logicalPlan) throws IOException { @@ -158,6 +182,37 @@ public void writeCachedBlock(Block block) throws IOException { nextCachedBlock++; } + @Override + public boolean writeAttributeCacheHeader(Attribute attribute) throws IOException { + if (getTransportVersion().onOrAfter(TransportVersions.ESQL_ATTRIBUTE_CACHED_SERIALIZATION)) { + Integer cacheId = attributeIdFromCache(attribute); + if (cacheId != null) { + writeZLong(cacheId); + return false; + } + + cacheId = cacheAttribute(attribute); + writeZLong(-1 - cacheId); + } + return true; + } + + private Integer attributeIdFromCache(Attribute attr) { + return cachedAttributes.get(attr); + } + + private int cacheAttribute(Attribute attr) { + if (cachedAttributes.containsKey(attr)) { + throw new IllegalArgumentException("Attribute already present in the serialization cache [" + attr + "]"); + } + int id = cachedAttributes.size(); + if (id >= maxSerializedAttributes) { + throw new InvalidArgumentException("Limit of the number of serialized attributes exceeded [{}]", maxSerializedAttributes); + } + cachedAttributes.put(attr, id); + return id; + } + /** * The byte representing a {@link Block} sent for the first time. The byte * will be followed by a {@link StreamOutput#writeVInt} encoded identifier diff --git a/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/expression/function/AbstractAttributeTestCase.java b/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/expression/function/AbstractAttributeTestCase.java index 17dcab2048eb1..9225c198afeea 100644 --- a/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/expression/function/AbstractAttributeTestCase.java +++ b/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/expression/function/AbstractAttributeTestCase.java @@ -12,12 +12,14 @@ import org.elasticsearch.common.io.stream.StreamOutput; import org.elasticsearch.common.io.stream.Writeable; import org.elasticsearch.test.AbstractWireSerializingTestCase; +import org.elasticsearch.xpack.esql.EsqlTestUtils; import org.elasticsearch.xpack.esql.core.expression.Attribute; import org.elasticsearch.xpack.esql.core.expression.FieldAttribute; import org.elasticsearch.xpack.esql.core.tree.Source; import org.elasticsearch.xpack.esql.core.type.EsField; import org.elasticsearch.xpack.esql.io.stream.PlanNameRegistry; import org.elasticsearch.xpack.esql.io.stream.PlanStreamInput; +import org.elasticsearch.xpack.esql.io.stream.PlanStreamOutput; import org.elasticsearch.xpack.esql.session.EsqlConfigurationSerializationTests; import java.io.IOException; @@ -82,7 +84,7 @@ public static class ExtraAttribute implements Writeable { @Override public void writeTo(StreamOutput out) throws IOException { - out.writeNamedWriteable(a); + new PlanStreamOutput(out, new PlanNameRegistry(), EsqlTestUtils.TEST_CFG).writeNamedWriteable(a); } @Override diff --git a/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/expression/function/MetadataAttributeTests.java b/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/expression/function/MetadataAttributeTests.java index 573af9c17bb1d..cf43a17361df5 100644 --- a/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/expression/function/MetadataAttributeTests.java +++ b/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/expression/function/MetadataAttributeTests.java @@ -16,6 +16,10 @@ public class MetadataAttributeTests extends AbstractAttributeTestCase { @Override protected MetadataAttribute create() { + return randomMetadataAttribute(); + } + + public static MetadataAttribute randomMetadataAttribute() { Source source = Source.EMPTY; String name = randomAlphaOfLength(5); DataType type = randomFrom(DataType.types()); diff --git a/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/expression/function/UnsupportedAttributeTests.java b/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/expression/function/UnsupportedAttributeTests.java index e195f31664774..4ab2959b37d29 100644 --- a/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/expression/function/UnsupportedAttributeTests.java +++ b/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/expression/function/UnsupportedAttributeTests.java @@ -15,6 +15,10 @@ public class UnsupportedAttributeTests extends AbstractAttributeTestCase { @Override protected UnsupportedAttribute create() { + return randomUnsupportedAttribute(); + } + + public static UnsupportedAttribute randomUnsupportedAttribute() { String name = randomAlphaOfLength(5); UnsupportedEsField field = UnsupportedEsFieldTests.randomUnsupportedEsField(4); String customMessage = randomBoolean() ? null : randomAlphaOfLength(9); diff --git a/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/io/stream/PlanStreamOutputTests.java b/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/io/stream/PlanStreamOutputTests.java index 00fb9d4943005..fe6e7fddc58c9 100644 --- a/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/io/stream/PlanStreamOutputTests.java +++ b/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/io/stream/PlanStreamOutputTests.java @@ -18,17 +18,31 @@ import org.elasticsearch.test.ESTestCase; import org.elasticsearch.test.TransportVersionUtils; import org.elasticsearch.xpack.esql.Column; +import org.elasticsearch.xpack.esql.core.InvalidArgumentException; +import org.elasticsearch.xpack.esql.core.expression.Attribute; +import org.elasticsearch.xpack.esql.core.expression.FieldAttribute; +import org.elasticsearch.xpack.esql.core.expression.NameId; import org.elasticsearch.xpack.esql.core.type.DataType; +import org.elasticsearch.xpack.esql.core.type.EsField; +import org.elasticsearch.xpack.esql.expression.function.MetadataAttributeTests; +import org.elasticsearch.xpack.esql.expression.function.ReferenceAttributeTests; +import org.elasticsearch.xpack.esql.expression.function.UnsupportedAttribute; +import org.elasticsearch.xpack.esql.expression.function.UnsupportedAttributeTests; import org.elasticsearch.xpack.esql.session.EsqlConfiguration; import org.elasticsearch.xpack.esql.session.EsqlConfigurationSerializationTests; import java.io.IOException; +import java.util.ArrayList; +import java.util.HashSet; +import java.util.List; import java.util.Map; import static org.hamcrest.Matchers.equalTo; import static org.hamcrest.Matchers.greaterThan; +import static org.hamcrest.Matchers.is; import static org.hamcrest.Matchers.lessThan; import static org.hamcrest.Matchers.not; +import static org.hamcrest.Matchers.nullValue; import static org.hamcrest.Matchers.sameInstance; public class PlanStreamOutputTests extends ESTestCase { @@ -113,6 +127,153 @@ public void testWriteBlockTwice() throws IOException { } } + public void testWriteAttributeMultipleTimes() throws IOException { + Attribute attribute = randomAttribute(); + EsqlConfiguration configuration = EsqlConfigurationSerializationTests.randomConfiguration(); + try ( + BytesStreamOutput out = new BytesStreamOutput(); + PlanStreamOutput planStream = new PlanStreamOutput(out, PlanNameRegistry.INSTANCE, configuration) + ) { + int occurrences = randomIntBetween(2, 150); + for (int i = 0; i < occurrences; i++) { + planStream.writeNamedWriteable(attribute); + } + int depth = 0; + Attribute parent = attribute; + while (parent != null) { + depth++; + parent = parent instanceof FieldAttribute f ? f.parent() : null; + } + assertThat(planStream.cachedAttributes.size(), is(depth)); + try (PlanStreamInput in = new PlanStreamInput(out.bytes().streamInput(), PlanNameRegistry.INSTANCE, REGISTRY, configuration)) { + Attribute first = in.readNamedWriteable(Attribute.class); + for (int i = 1; i < occurrences; i++) { + Attribute next = in.readNamedWriteable(Attribute.class); + assertThat(first, sameInstance(next)); + } + for (int i = 0; i < depth; i++) { + assertThat(first, equalTo(attribute)); + first = first instanceof FieldAttribute f ? f.parent() : null; + attribute = attribute instanceof FieldAttribute f ? f.parent() : null; + } + assertThat(first, is(nullValue())); + assertThat(attribute, is(nullValue())); + } + } + } + + public void testWriteMultipleAttributes() throws IOException { + EsqlConfiguration configuration = EsqlConfigurationSerializationTests.randomConfiguration(); + try ( + BytesStreamOutput out = new BytesStreamOutput(); + PlanStreamOutput planStream = new PlanStreamOutput(out, PlanNameRegistry.INSTANCE, configuration) + ) { + List attrs = new ArrayList<>(); + int occurrences = randomIntBetween(2, 300); + for (int i = 0; i < occurrences; i++) { + attrs.add(randomAttribute()); + } + + // send all the attributes, three times + for (int i = 0; i < 3; i++) { + for (Attribute attr : attrs) { + planStream.writeNamedWriteable(attr); + } + } + + try (PlanStreamInput in = new PlanStreamInput(out.bytes().streamInput(), PlanNameRegistry.INSTANCE, REGISTRY, configuration)) { + List readAttrs = new ArrayList<>(); + for (int i = 0; i < occurrences; i++) { + readAttrs.add(in.readNamedWriteable(Attribute.class)); + assertThat(readAttrs.get(i), equalTo(attrs.get(i))); + } + // two more times + for (int i = 0; i < 2; i++) { + for (int j = 0; j < occurrences; j++) { + Attribute attr = in.readNamedWriteable(Attribute.class); + assertThat(attr, sameInstance(readAttrs.get(j))); + } + } + } + } + } + + public void testWriteMultipleAttributesWithSmallCache() throws IOException { + EsqlConfiguration configuration = EsqlConfigurationSerializationTests.randomConfiguration(); + try ( + BytesStreamOutput out = new BytesStreamOutput(); + PlanStreamOutput planStream = new PlanStreamOutput(out, PlanNameRegistry.INSTANCE, configuration, PlanNamedTypes::name, 10) + ) { + expectThrows(InvalidArgumentException.class, () -> { + for (int i = 0; i <= 10; i++) { + planStream.writeNamedWriteable(randomAttribute()); + } + }); + } + } + + public void testWriteEqualAttributesDifferentID() throws IOException { + EsqlConfiguration configuration = EsqlConfigurationSerializationTests.randomConfiguration(); + try ( + BytesStreamOutput out = new BytesStreamOutput(); + PlanStreamOutput planStream = new PlanStreamOutput(out, PlanNameRegistry.INSTANCE, configuration) + ) { + + Attribute one = randomAttribute(); + Attribute two = one.withId(new NameId()); + + planStream.writeNamedWriteable(one); + planStream.writeNamedWriteable(two); + + try (PlanStreamInput in = new PlanStreamInput(out.bytes().streamInput(), PlanNameRegistry.INSTANCE, REGISTRY, configuration)) { + Attribute oneCopy = in.readNamedWriteable(Attribute.class); + Attribute twoCopy = in.readNamedWriteable(Attribute.class); + + assertThat(oneCopy, equalTo(one)); + assertThat(twoCopy, equalTo(two)); + + assertThat(oneCopy.id(), not(equalTo(twoCopy.id()))); + } + } + } + + public void testWriteDifferentAttributesSameID() throws IOException { + EsqlConfiguration configuration = EsqlConfigurationSerializationTests.randomConfiguration(); + try ( + BytesStreamOutput out = new BytesStreamOutput(); + PlanStreamOutput planStream = new PlanStreamOutput(out, PlanNameRegistry.INSTANCE, configuration) + ) { + + Attribute one = randomAttribute(); + Attribute two = randomAttribute().withId(one.id()); + + planStream.writeNamedWriteable(one); + planStream.writeNamedWriteable(two); + + try (PlanStreamInput in = new PlanStreamInput(out.bytes().streamInput(), PlanNameRegistry.INSTANCE, REGISTRY, configuration)) { + Attribute oneCopy = in.readNamedWriteable(Attribute.class); + Attribute twoCopy = in.readNamedWriteable(Attribute.class); + + assertThat(oneCopy, equalTo(one)); + assertThat(twoCopy, equalTo(two)); + + assertThat(oneCopy, not(equalTo(twoCopy))); + assertThat(oneCopy.id(), equalTo(twoCopy.id())); + } + } + } + + private static Attribute randomAttribute() { + return switch (randomInt(3)) { + case 0 -> PlanNamedTypesTests.randomFieldAttribute(); + case 1 -> ReferenceAttributeTests.randomReferenceAttribute(); + case 2 -> UnsupportedAttributeTests.randomUnsupportedAttribute(); + case 3 -> MetadataAttributeTests.randomMetadataAttribute(); + default -> throw new IllegalArgumentException(); + + }; + } + private EsqlConfiguration randomConfiguration(Map> tables) { return EsqlConfigurationSerializationTests.randomConfiguration("query_" + randomAlphaOfLength(1), tables); } @@ -133,5 +294,14 @@ private Column randomColumn() { BigArrays.NON_RECYCLING_INSTANCE ); - private static final NamedWriteableRegistry REGISTRY = new NamedWriteableRegistry(Block.getNamedWriteables()); + private static final NamedWriteableRegistry REGISTRY; + + static { + List writeables = new ArrayList<>(); + writeables.addAll(Block.getNamedWriteables()); + writeables.addAll(Attribute.getNamedWriteables()); + writeables.add(UnsupportedAttribute.ENTRY); + writeables.addAll(EsField.getNamedWriteables()); + REGISTRY = new NamedWriteableRegistry(new ArrayList<>(new HashSet<>(writeables))); + } } From d4f330fd10d99dbdc349c69d434e0d8ec8a31bf0 Mon Sep 17 00:00:00 2001 From: Yang Wang Date: Fri, 2 Aug 2024 18:56:15 +1000 Subject: [PATCH 19/36] Use milliseconds for time histogram metrics (#111502) LongHistogram's default base2 exponentional aggregagtion is optimized for latency range of 1ms to 100s. Hence we should record time metrics in milliseconds instead of micros. Relates: ES-9065 --- .../s3/S3BlobStoreRepositoryMetricsTests.java | 43 ++++++++++++++++--- .../repositories/s3/S3BlobStore.java | 21 ++++----- .../repositories/RepositoriesMetrics.java | 10 ++--- .../blobcache/BlobCacheMetrics.java | 4 +- .../shared/SharedBlobCacheService.java | 2 +- 5 files changed, 56 insertions(+), 24 deletions(-) diff --git a/modules/repository-s3/src/internalClusterTest/java/org/elasticsearch/repositories/s3/S3BlobStoreRepositoryMetricsTests.java b/modules/repository-s3/src/internalClusterTest/java/org/elasticsearch/repositories/s3/S3BlobStoreRepositoryMetricsTests.java index 640293ecb80b0..31fa47fb7b196 100644 --- a/modules/repository-s3/src/internalClusterTest/java/org/elasticsearch/repositories/s3/S3BlobStoreRepositoryMetricsTests.java +++ b/modules/repository-s3/src/internalClusterTest/java/org/elasticsearch/repositories/s3/S3BlobStoreRepositoryMetricsTests.java @@ -36,8 +36,9 @@ import java.util.Map; import java.util.Queue; import java.util.concurrent.LinkedBlockingQueue; +import java.util.concurrent.TimeUnit; -import static org.elasticsearch.repositories.RepositoriesMetrics.HTTP_REQUEST_TIME_IN_MICROS_HISTOGRAM; +import static org.elasticsearch.repositories.RepositoriesMetrics.HTTP_REQUEST_TIME_IN_MILLIS_HISTOGRAM; import static org.elasticsearch.repositories.RepositoriesMetrics.METRIC_EXCEPTIONS_HISTOGRAM; import static org.elasticsearch.repositories.RepositoriesMetrics.METRIC_EXCEPTIONS_REQUEST_RANGE_NOT_SATISFIED_TOTAL; import static org.elasticsearch.repositories.RepositoriesMetrics.METRIC_EXCEPTIONS_TOTAL; @@ -52,6 +53,7 @@ import static org.elasticsearch.rest.RestStatus.TOO_MANY_REQUESTS; import static org.hamcrest.Matchers.equalTo; import static org.hamcrest.Matchers.instanceOf; +import static org.hamcrest.Matchers.lessThanOrEqualTo; @SuppressForbidden(reason = "this test uses a HttpServer to emulate an S3 endpoint") // Need to set up a new cluster for each test because cluster settings use randomized authentication settings @@ -98,6 +100,35 @@ private static BlobContainer getBlobContainer(String dataNodeName, String reposi return blobStoreRepository.blobStore().blobContainer(BlobPath.EMPTY.add(randomIdentifier())); } + public void testHttpRequestTimeCaptureInMilliseconds() throws IOException { + final String repository = createRepository(randomRepositoryName()); + final String dataNodeName = internalCluster().getNodeNameThat(DiscoveryNode::canContainData); + final TestTelemetryPlugin plugin = getPlugin(dataNodeName); + final OperationPurpose purpose = randomFrom(OperationPurpose.values()); + final BlobContainer blobContainer = getBlobContainer(dataNodeName, repository); + final String blobName = randomIdentifier(); + + long before = System.nanoTime(); + blobContainer.writeBlob(purpose, blobName, new BytesArray(randomBytes(between(10, 1000))), false); + long elapsed = TimeUnit.NANOSECONDS.toMillis(System.nanoTime() - before); + assertThat(getLongHistogramValue(plugin, HTTP_REQUEST_TIME_IN_MILLIS_HISTOGRAM, Operation.PUT_OBJECT), lessThanOrEqualTo(elapsed)); + + plugin.resetMeter(); + before = System.nanoTime(); + blobContainer.readBlob(purpose, blobName).close(); + elapsed = TimeUnit.NANOSECONDS.toMillis(System.nanoTime() - before); + assertThat(getLongHistogramValue(plugin, HTTP_REQUEST_TIME_IN_MILLIS_HISTOGRAM, Operation.GET_OBJECT), lessThanOrEqualTo(elapsed)); + + plugin.resetMeter(); + before = System.nanoTime(); + blobContainer.deleteBlobsIgnoringIfNotExists(purpose, Iterators.single(blobName)); + elapsed = TimeUnit.NANOSECONDS.toMillis(System.nanoTime() - before); + assertThat( + getLongHistogramValue(plugin, HTTP_REQUEST_TIME_IN_MILLIS_HISTOGRAM, Operation.DELETE_OBJECTS), + lessThanOrEqualTo(elapsed) + ); + } + public void testMetricsWithErrors() throws IOException { final String repository = createRepository(randomRepositoryName()); @@ -121,7 +152,7 @@ public void testMetricsWithErrors() throws IOException { assertThat(getLongCounterValue(plugin, METRIC_THROTTLES_TOTAL, Operation.PUT_OBJECT), equalTo(2L * batch)); assertThat(getLongHistogramValue(plugin, METRIC_EXCEPTIONS_HISTOGRAM, Operation.PUT_OBJECT), equalTo(batch)); assertThat(getLongHistogramValue(plugin, METRIC_THROTTLES_HISTOGRAM, Operation.PUT_OBJECT), equalTo(2L * batch)); - assertThat(getNumberOfMeasurements(plugin, HTTP_REQUEST_TIME_IN_MICROS_HISTOGRAM, Operation.PUT_OBJECT), equalTo(batch)); + assertThat(getNumberOfMeasurements(plugin, HTTP_REQUEST_TIME_IN_MILLIS_HISTOGRAM, Operation.PUT_OBJECT), equalTo(batch)); } // Get not found @@ -141,7 +172,7 @@ public void testMetricsWithErrors() throws IOException { assertThat(getLongCounterValue(plugin, METRIC_THROTTLES_TOTAL, Operation.GET_OBJECT), equalTo(batch)); assertThat(getLongHistogramValue(plugin, METRIC_EXCEPTIONS_HISTOGRAM, Operation.GET_OBJECT), equalTo(batch)); assertThat(getLongHistogramValue(plugin, METRIC_THROTTLES_HISTOGRAM, Operation.GET_OBJECT), equalTo(batch)); - assertThat(getNumberOfMeasurements(plugin, HTTP_REQUEST_TIME_IN_MICROS_HISTOGRAM, Operation.GET_OBJECT), equalTo(batch)); + assertThat(getNumberOfMeasurements(plugin, HTTP_REQUEST_TIME_IN_MILLIS_HISTOGRAM, Operation.GET_OBJECT), equalTo(batch)); // Make sure we don't hit the request range not satisfied counters assertThat(getLongCounterValue(plugin, METRIC_EXCEPTIONS_REQUEST_RANGE_NOT_SATISFIED_TOTAL, Operation.GET_OBJECT), equalTo(0L)); @@ -164,7 +195,7 @@ public void testMetricsWithErrors() throws IOException { assertThat(getLongCounterValue(plugin, METRIC_THROTTLES_TOTAL, Operation.LIST_OBJECTS), equalTo(5L * batch)); assertThat(getLongHistogramValue(plugin, METRIC_EXCEPTIONS_HISTOGRAM, Operation.LIST_OBJECTS), equalTo(batch)); assertThat(getLongHistogramValue(plugin, METRIC_THROTTLES_HISTOGRAM, Operation.LIST_OBJECTS), equalTo(5L * batch)); - assertThat(getNumberOfMeasurements(plugin, HTTP_REQUEST_TIME_IN_MICROS_HISTOGRAM, Operation.LIST_OBJECTS), equalTo(batch)); + assertThat(getNumberOfMeasurements(plugin, HTTP_REQUEST_TIME_IN_MILLIS_HISTOGRAM, Operation.LIST_OBJECTS), equalTo(batch)); } // Delete to clean up @@ -176,7 +207,7 @@ public void testMetricsWithErrors() throws IOException { assertThat(getLongCounterValue(plugin, METRIC_THROTTLES_TOTAL, Operation.DELETE_OBJECTS), equalTo(0L)); assertThat(getLongHistogramValue(plugin, METRIC_EXCEPTIONS_HISTOGRAM, Operation.DELETE_OBJECTS), equalTo(0L)); assertThat(getLongHistogramValue(plugin, METRIC_THROTTLES_HISTOGRAM, Operation.DELETE_OBJECTS), equalTo(0L)); - assertThat(getNumberOfMeasurements(plugin, HTTP_REQUEST_TIME_IN_MICROS_HISTOGRAM, Operation.DELETE_OBJECTS), equalTo(1L)); + assertThat(getNumberOfMeasurements(plugin, HTTP_REQUEST_TIME_IN_MILLIS_HISTOGRAM, Operation.DELETE_OBJECTS), equalTo(1L)); } public void testMetricsForRequestRangeNotSatisfied() { @@ -208,7 +239,7 @@ public void testMetricsForRequestRangeNotSatisfied() { ); assertThat(getLongCounterValue(plugin, METRIC_THROTTLES_TOTAL, Operation.GET_OBJECT), equalTo(2 * batch)); assertThat(getLongHistogramValue(plugin, METRIC_THROTTLES_HISTOGRAM, Operation.GET_OBJECT), equalTo(2 * batch)); - assertThat(getNumberOfMeasurements(plugin, HTTP_REQUEST_TIME_IN_MICROS_HISTOGRAM, Operation.GET_OBJECT), equalTo(batch)); + assertThat(getNumberOfMeasurements(plugin, HTTP_REQUEST_TIME_IN_MILLIS_HISTOGRAM, Operation.GET_OBJECT), equalTo(batch)); } } diff --git a/modules/repository-s3/src/main/java/org/elasticsearch/repositories/s3/S3BlobStore.java b/modules/repository-s3/src/main/java/org/elasticsearch/repositories/s3/S3BlobStore.java index 5af53364fb765..fbf4767bd3e99 100644 --- a/modules/repository-s3/src/main/java/org/elasticsearch/repositories/s3/S3BlobStore.java +++ b/modules/repository-s3/src/main/java/org/elasticsearch/repositories/s3/S3BlobStore.java @@ -224,11 +224,11 @@ private void maybeRecordHttpRequestTime(Request request) { return; } - final long totalTimeInMicros = getTotalTimeInMicros(requestTimesIncludingRetries); - if (totalTimeInMicros == 0) { + final long totalTimeInMillis = getTotalTimeInMillis(requestTimesIncludingRetries); + if (totalTimeInMillis == 0) { logger.warn("Expected HttpRequestTime to be tracked for request [{}] but found no count.", request); } else { - s3RepositoriesMetrics.common().httpRequestTimeInMicroHistogram().record(totalTimeInMicros, attributes); + s3RepositoriesMetrics.common().httpRequestTimeInMillisHistogram().record(totalTimeInMillis, attributes); } } @@ -271,18 +271,19 @@ private static long getCountForMetric(TimingInfo info, AWSRequestMetrics.Field f } } - private static long getTotalTimeInMicros(List requestTimesIncludingRetries) { - // Here we calculate the timing in Microseconds for the sum of the individual subMeasurements with the goal of deriving the TTFB - // (time to first byte). We calculate the time in micros for later use with an APM style counter (exposed as a long), rather than - // using the default double exposed by getTimeTakenMillisIfKnown(). - long totalTimeInMicros = 0; + private static long getTotalTimeInMillis(List requestTimesIncludingRetries) { + // Here we calculate the timing in Milliseconds for the sum of the individual subMeasurements with the goal of deriving the TTFB + // (time to first byte). We calculate the time in millis for later use with an APM style counter (exposed as a long), rather than + // using the default double exposed by getTimeTakenMillisIfKnown(). We don't need sub-millisecond precision. So no need perform + // the data type castings. + long totalTimeInMillis = 0; for (TimingInfo timingInfo : requestTimesIncludingRetries) { var endTimeInNanos = timingInfo.getEndTimeNanoIfKnown(); if (endTimeInNanos != null) { - totalTimeInMicros += TimeUnit.NANOSECONDS.toMicros(endTimeInNanos - timingInfo.getStartTimeNano()); + totalTimeInMillis += TimeUnit.NANOSECONDS.toMillis(endTimeInNanos - timingInfo.getStartTimeNano()); } } - return totalTimeInMicros; + return totalTimeInMillis; } @Override diff --git a/server/src/main/java/org/elasticsearch/repositories/RepositoriesMetrics.java b/server/src/main/java/org/elasticsearch/repositories/RepositoriesMetrics.java index 85f06580cee79..02cc1a7458712 100644 --- a/server/src/main/java/org/elasticsearch/repositories/RepositoriesMetrics.java +++ b/server/src/main/java/org/elasticsearch/repositories/RepositoriesMetrics.java @@ -22,7 +22,7 @@ public record RepositoriesMetrics( LongCounter unsuccessfulOperationCounter, LongHistogram exceptionHistogram, LongHistogram throttleHistogram, - LongHistogram httpRequestTimeInMicroHistogram + LongHistogram httpRequestTimeInMillisHistogram ) { public static RepositoriesMetrics NOOP = new RepositoriesMetrics(MeterRegistry.NOOP); @@ -36,7 +36,7 @@ public record RepositoriesMetrics( public static final String METRIC_UNSUCCESSFUL_OPERATIONS_TOTAL = "es.repositories.operations.unsuccessful.total"; public static final String METRIC_EXCEPTIONS_HISTOGRAM = "es.repositories.exceptions.histogram"; public static final String METRIC_THROTTLES_HISTOGRAM = "es.repositories.throttles.histogram"; - public static final String HTTP_REQUEST_TIME_IN_MICROS_HISTOGRAM = "es.repositories.requests.http_request_time.histogram"; + public static final String HTTP_REQUEST_TIME_IN_MILLIS_HISTOGRAM = "es.repositories.requests.http_request_time.histogram"; public RepositoriesMetrics(MeterRegistry meterRegistry) { this( @@ -54,9 +54,9 @@ public RepositoriesMetrics(MeterRegistry meterRegistry) { meterRegistry.registerLongHistogram(METRIC_EXCEPTIONS_HISTOGRAM, "repository request exception histogram", "unit"), meterRegistry.registerLongHistogram(METRIC_THROTTLES_HISTOGRAM, "repository request throttle histogram", "unit"), meterRegistry.registerLongHistogram( - HTTP_REQUEST_TIME_IN_MICROS_HISTOGRAM, - "HttpRequestTime in microseconds expressed as as a histogram", - "micros" + HTTP_REQUEST_TIME_IN_MILLIS_HISTOGRAM, + "HttpRequestTime in milliseconds expressed as as a histogram", + "ms" ) ); } diff --git a/x-pack/plugin/blob-cache/src/main/java/org/elasticsearch/blobcache/BlobCacheMetrics.java b/x-pack/plugin/blob-cache/src/main/java/org/elasticsearch/blobcache/BlobCacheMetrics.java index 7ca37f376045f..e92aa89022f35 100644 --- a/x-pack/plugin/blob-cache/src/main/java/org/elasticsearch/blobcache/BlobCacheMetrics.java +++ b/x-pack/plugin/blob-cache/src/main/java/org/elasticsearch/blobcache/BlobCacheMetrics.java @@ -31,8 +31,8 @@ public BlobCacheMetrics(MeterRegistry meterRegistry) { ), meterRegistry.registerLongHistogram( "es.blob_cache.cache_miss_load_times.histogram", - "The time in microseconds for populating entries in the blob store resulting from a cache miss, expressed as a histogram.", - "micros" + "The time in milliseconds for populating entries in the blob store resulting from a cache miss, expressed as a histogram.", + "ms" ) ); } diff --git a/x-pack/plugin/blob-cache/src/main/java/org/elasticsearch/blobcache/shared/SharedBlobCacheService.java b/x-pack/plugin/blob-cache/src/main/java/org/elasticsearch/blobcache/shared/SharedBlobCacheService.java index 9cb83e35b63d6..3d95db72e269d 100644 --- a/x-pack/plugin/blob-cache/src/main/java/org/elasticsearch/blobcache/shared/SharedBlobCacheService.java +++ b/x-pack/plugin/blob-cache/src/main/java/org/elasticsearch/blobcache/shared/SharedBlobCacheService.java @@ -1116,7 +1116,7 @@ public void fillCacheRange( IntConsumer progressUpdater ) throws IOException { writer.fillCacheRange(channel, channelPos, streamFactory, relativePos, length, progressUpdater); - var elapsedTime = TimeUnit.NANOSECONDS.toMicros(relativeTimeInNanosSupplier.getAsLong() - startTime); + var elapsedTime = TimeUnit.NANOSECONDS.toMillis(relativeTimeInNanosSupplier.getAsLong() - startTime); SharedBlobCacheService.this.blobCacheMetrics.getCacheMissLoadTimes().record(elapsedTime); SharedBlobCacheService.this.blobCacheMetrics.getCacheMissCounter().increment(); } From 02c494963a59610a0be07c7d54337017a1e5beaf Mon Sep 17 00:00:00 2001 From: Kathleen DeRusso Date: Fri, 2 Aug 2024 08:03:54 -0400 Subject: [PATCH 20/36] [Query rules] Add `exclude` query rule type (#111420) * Cleanup: Remove pinned IDs from applied rules in favor of single applied docs * Add support for query rules of type exclude, to exclude specified documents from result sets * Support exluded documents that specify the _index as well as the _id * Cleanup * Update docs/changelog/111420.yaml * Update docs * Spotless * PR feedback - docs updates * Apply PR feedback * PR feedback --------- Co-authored-by: Elastic Machine --- docs/changelog/111420.yaml | 5 + docs/reference/query-dsl/rule-query.asciidoc | 3 +- .../query-rules/apis/put-query-rule.asciidoc | 18 +- .../apis/put-query-ruleset.asciidoc | 16 +- .../search-using-query-rules.asciidoc | 17 +- .../entsearch/rules/10_query_ruleset_put.yml | 24 ++ .../entsearch/rules/20_query_ruleset_list.yml | 22 +- .../entsearch/rules/40_rule_query_search.yml | 245 +++++++++++++++++- .../entsearch/rules/50_query_rule_put.yml | 8 +- .../application/rules/AppliedQueryRules.java | 20 +- .../xpack/application/rules/QueryRule.java | 84 +++--- .../application/rules/RuleQueryBuilder.java | 121 ++++++--- .../application/rules/QueryRuleTests.java | 165 +++++++++++- .../PinnedQueryBuilderIT.java | 40 +-- .../PinnedQueryBuilder.java | 146 ++--------- .../SpecifiedDocument.java | 136 ++++++++++ .../PinnedQueryBuilderTests.java | 45 ++-- 17 files changed, 818 insertions(+), 297 deletions(-) create mode 100644 docs/changelog/111420.yaml create mode 100644 x-pack/plugin/search-business-rules/src/main/java/org/elasticsearch/xpack/searchbusinessrules/SpecifiedDocument.java diff --git a/docs/changelog/111420.yaml b/docs/changelog/111420.yaml new file mode 100644 index 0000000000000..4e2640ac5762a --- /dev/null +++ b/docs/changelog/111420.yaml @@ -0,0 +1,5 @@ +pr: 111420 +summary: "[Query rules] Add `exclude` query rule type" +area: Relevance +type: feature +issues: [] diff --git a/docs/reference/query-dsl/rule-query.asciidoc b/docs/reference/query-dsl/rule-query.asciidoc index cc5616c01eecd..dfedc2261bbde 100644 --- a/docs/reference/query-dsl/rule-query.asciidoc +++ b/docs/reference/query-dsl/rule-query.asciidoc @@ -13,9 +13,10 @@ The old syntax using `rule_query` and `ruleset_id` is deprecated and will be rem ==== Applies <> to the query before returning results. -This feature is used to promote documents in the manner of a <> based on matching defined rules. +Query rules can be used to promote documents in the manner of a <> based on matching defined rules, or to identify specific documents to exclude from a contextual result set. If no matching query rules are defined, the "organic" matches for the query are returned. All matching rules are applied in the order in which they appear in the query ruleset. +If the same document matches both an `exclude` rule and a `pinned` rule, the document will be excluded. [NOTE] ==== diff --git a/docs/reference/query-rules/apis/put-query-rule.asciidoc b/docs/reference/query-rules/apis/put-query-rule.asciidoc index 9737673be009c..714ed9b096d1d 100644 --- a/docs/reference/query-rules/apis/put-query-rule.asciidoc +++ b/docs/reference/query-rules/apis/put-query-rule.asciidoc @@ -26,7 +26,10 @@ Requires the `manage_search_query_rules` privilege. `type`:: (Required, string) The type of rule. -At this time only `pinned` query rule types are allowed. +At this time the following query rule types are allowed: + +- `pinned` will identify and pin specific documents to the top of search results. +- `exclude` will exclude specific documents from search results. `criteria`:: (Required, array of objects) The criteria that must be met for the rule to be applied. @@ -80,17 +83,18 @@ Required for all criteria types except `always`. The format of this action depends on the rule type. Actions depend on the rule type. -For `pinned` rules, actions follow the format specified by the <>. -The following actions are allowed: +The following actions are allowed for `pinned` or `exclude` rules: -- `ids` (Optional, array of strings) The unique <> of the documents to pin. +- `ids` (Optional, array of strings) The unique <> of the documents to apply the rule to. Only one of `ids` or `docs` may be specified, and at least one must be specified. -- `docs` (Optional, array of objects) The documents to pin. +- `docs` (Optional, array of objects) The documents to apply the rule to. Only one of `ids` or `docs` may be specified, and at least one must be specified. +There is a maximum value of 100 documents in a rule. You can specify the following attributes for each document: + -- -- `_index` (Required, string) The index of the document to pin. +- `_index` (Required, string) The index of the document. +If null, all documents with the specified `_id` will be affected across all searched indices. - `_id` (Required, string) The unique <>. -- @@ -104,7 +108,7 @@ If multiple matching rules pin more than 100 documents, only the first 100 docum The following example creates a new query rule with the ID `my-rule1` in a query ruleset called `my-ruleset`. -`my-rule1` will pin documents with IDs `id1` and `id2` when `user_query` contains `pugs` _or_ `puggles` **and** `user_country` exactly matches `us`. +- `my-rule1` will select documents to promote with IDs `id1` and `id2` when `user_query` contains `pugs` _or_ `puggles` **and** `user_country` exactly matches `us`. [source,console] ---- diff --git a/docs/reference/query-rules/apis/put-query-ruleset.asciidoc b/docs/reference/query-rules/apis/put-query-ruleset.asciidoc index c164e9e140a4e..df7ec100db076 100644 --- a/docs/reference/query-rules/apis/put-query-ruleset.asciidoc +++ b/docs/reference/query-rules/apis/put-query-ruleset.asciidoc @@ -34,7 +34,7 @@ Each rule must have the following information: - `rule_id` (Required, string) A unique identifier for this rule. - `type` (Required, string) The type of rule. -At this time only `pinned` query rule types are allowed. +At this time only `pinned` and `exclude` query rule types are allowed. - `criteria` (Required, array of objects) The criteria that must be met for the rule to be applied. If multiple criteria are specified for a rule, all criteria must be met for the rule to be applied. - `actions` (Required, object) The actions to take when the rule is matched. @@ -84,13 +84,13 @@ Only one value must match for the criteria to be met. Required for all criteria types except `always`. Actions depend on the rule type. -For `pinned` rules, actions follow the format specified by the <>. -The following actions are allowed: +The following actions are allowed for `pinned` or `exclude` rules: -- `ids` (Optional, array of strings) The unique <> of the documents to pin. +- `ids` (Optional, array of strings) The unique <> of the documents to apply the rule to. Only one of `ids` or `docs` may be specified, and at least one must be specified. -- `docs` (Optional, array of objects) The documents to pin. +- `docs` (Optional, array of objects) The documents to apply the rule to. Only one of `ids` or `docs` may be specified, and at least one must be specified. +There is a maximum value of 100 documents in a rule. You can specify the following attributes for each document: + -- @@ -98,7 +98,7 @@ You can specify the following attributes for each document: - `_id` (Required, string) The unique <>. -- -IMPORTANT: Due to limitations within <>, you can only pin documents using `ids` or `docs`, but cannot use both in single rule. +IMPORTANT: Due to limitations within <>, you can only select documents using `ids` or `docs`, but cannot use both in single rule. It is advised to use one or the other in query rulesets, to avoid errors. Additionally, pinned queries have a maximum limit of 100 pinned hits. If multiple matching rules pin more than 100 documents, only the first 100 documents are pinned in the order they are specified in the ruleset. @@ -111,7 +111,7 @@ The following example creates a new query ruleset called `my-ruleset`. Two rules are associated with `my-ruleset`: - `my-rule1` will pin documents with IDs `id1` and `id2` when `user_query` contains `pugs` _or_ `puggles` **and** `user_country` exactly matches `us`. -- `my-rule2` will pin documents from different, specified indices with IDs `id3` and `id4` when the `query_string` fuzzily matches `rescue dogs`. +- `my-rule2` will exclude documents from different, specified indices with IDs `id3` and `id4` when the `query_string` fuzzily matches `rescue dogs`. [source,console] ---- @@ -142,7 +142,7 @@ PUT _query_rules/my-ruleset }, { "rule_id": "my-rule2", - "type": "pinned", + "type": "exclude", "criteria": [ { "type": "fuzzy", diff --git a/docs/reference/search/search-your-data/search-using-query-rules.asciidoc b/docs/reference/search/search-your-data/search-using-query-rules.asciidoc index 594c22fb65981..18be825d02376 100644 --- a/docs/reference/search/search-your-data/search-using-query-rules.asciidoc +++ b/docs/reference/search/search-your-data/search-using-query-rules.asciidoc @@ -37,9 +37,10 @@ When defining a rule, consider the following: ===== Rule type The type of rule we want to apply. -For the moment there is a single rule type: +We support the following rule types: * `pinned` will re-write the query into a <>, pinning specified results matching the query rule at the top of the returned result set. +* `exclude` will exclude specified results from the returned result set. [discrete] [[query-rule-criteria]] @@ -91,12 +92,11 @@ Allowed criteria types are: The actions to take when the rule matches a query: -* `ids` will pin the specified <>s. -* `docs` will pin the specified documents in the specified indices. +* `ids` will select the specified <>s. +* `docs` will select the specified documents in the specified indices. Use `ids` when searching over a single index, and `docs` when searching over multiple indices. `ids` and `docs` cannot be combined in the same query. -See <> for details. [discrete] [[add-query-rules]] @@ -105,10 +105,10 @@ See <> for details. You can add query rules using the <> call. This adds a ruleset containing one or more query rules that will be applied to queries that match their specified criteria. -The following command will create a query ruleset called `my-ruleset` with two pinned document rules: +The following command will create a query ruleset called `my-ruleset` with two query rules: * The first rule will generate a <> pinning the <>s `id1` and `id2` when the `query_string` metadata value is a fuzzy match to either `puggles` or `pugs` _and_ the user's location is in the US. -* The second rule will generate a <> pinning the <> of `id3` specifically from the `my-index-000001` index and `id4` from the `my-index-000002` index when the `query_string` metadata value contains `beagles`. +* The second rule will generate a query that excludes the <> `id3` specifically from the `my-index-000001` index and `id4` from the `my-index-000002` index when the `query_string` metadata value contains `beagles`. //// [source,console] @@ -147,7 +147,7 @@ PUT /_query_rules/my-ruleset }, { "rule_id": "rule2", - "type": "pinned", + "type": "exclude", "criteria": [ { "type": "contains", @@ -222,7 +222,8 @@ This rule query will match against `rule1` in the defined query ruleset, and wil Any other matches from the organic query will be returned below the pinned results. It's possible to have multiple rules in a ruleset match a single <>. -In this case, the pinned documents are returned in the following order: +In this case, the rules are applied in the following order: - Where the matching rule appears in the ruleset - If multiple documents are specified in a single rule, in the order they are specified +- If a document is matched by both a `pinned` rule and an `exclude` rule, the `exclude` rule will take precedence diff --git a/x-pack/plugin/ent-search/qa/rest/src/yamlRestTest/resources/rest-api-spec/test/entsearch/rules/10_query_ruleset_put.yml b/x-pack/plugin/ent-search/qa/rest/src/yamlRestTest/resources/rest-api-spec/test/entsearch/rules/10_query_ruleset_put.yml index a1f9eeccf2002..ee94cff46d1fb 100644 --- a/x-pack/plugin/ent-search/qa/rest/src/yamlRestTest/resources/rest-api-spec/test/entsearch/rules/10_query_ruleset_put.yml +++ b/x-pack/plugin/ent-search/qa/rest/src/yamlRestTest/resources/rest-api-spec/test/entsearch/rules/10_query_ruleset_put.yml @@ -44,6 +44,18 @@ teardown: '_id': 'id3' - '_index': 'test-index2' '_id': 'id4' + - rule_id: query-rule-id3 + type: exclude + criteria: + - type: exact + metadata: query_string + values: [ logstash ] + actions: + docs: + - '_index': 'test-index1' + '_id': 'id4' + - '_index': 'test-index2' + '_id': 'id5' - match: { result: 'created' } @@ -75,6 +87,18 @@ teardown: '_id': 'id3' - '_index': 'test-index2' '_id': 'id4' + - rule_id: query-rule-id3 + type: exclude + criteria: + - type: exact + metadata: query_string + values: [ logstash ] + actions: + docs: + - '_index': 'test-index1' + '_id': 'id4' + - '_index': 'test-index2' + '_id': 'id5' --- 'Create Query Ruleset - Resource already exists': diff --git a/x-pack/plugin/ent-search/qa/rest/src/yamlRestTest/resources/rest-api-spec/test/entsearch/rules/20_query_ruleset_list.yml b/x-pack/plugin/ent-search/qa/rest/src/yamlRestTest/resources/rest-api-spec/test/entsearch/rules/20_query_ruleset_list.yml index f2ced956b5369..172d38cce5384 100644 --- a/x-pack/plugin/ent-search/qa/rest/src/yamlRestTest/resources/rest-api-spec/test/entsearch/rules/20_query_ruleset_list.yml +++ b/x-pack/plugin/ent-search/qa/rest/src/yamlRestTest/resources/rest-api-spec/test/entsearch/rules/20_query_ruleset_list.yml @@ -109,6 +109,16 @@ setup: ids: - 'id7' - 'id8' + - rule_id: query-rule-id5 + type: exclude + criteria: + - type: fuzzy + metadata: query_string + values: [ inference ] + actions: + ids: + - 'id9' + - 'id10' --- teardown: - do: @@ -144,8 +154,8 @@ teardown: - match: { results.0.rule_criteria_types_counts: { exact: 3 } } - match: { results.1.ruleset_id: "test-query-ruleset-2" } - - match: { results.1.rule_total_count: 4 } - - match: { results.1.rule_criteria_types_counts: { exact: 4 } } + - match: { results.1.rule_total_count: 5 } + - match: { results.1.rule_criteria_types_counts: { exact: 4, fuzzy: 1 } } - match: { results.2.ruleset_id: "test-query-ruleset-3" } - match: { results.2.rule_total_count: 2 } @@ -161,8 +171,8 @@ teardown: # Alphabetical order by ruleset_id for results - match: { results.0.ruleset_id: "test-query-ruleset-2" } - - match: { results.0.rule_total_count: 4 } - - match: { results.0.rule_criteria_types_counts: { exact: 4 } } + - match: { results.0.rule_total_count: 5 } + - match: { results.0.rule_criteria_types_counts: { exact: 4, fuzzy: 1 } } - match: { results.1.ruleset_id: "test-query-ruleset-3" } - match: { results.1.rule_total_count: 2 } @@ -182,8 +192,8 @@ teardown: - match: { results.0.rule_criteria_types_counts: { exact: 3 } } - match: { results.1.ruleset_id: "test-query-ruleset-2" } - - match: { results.1.rule_total_count: 4 } - - match: { results.1.rule_criteria_types_counts: { exact: 4 } } + - match: { results.1.rule_total_count: 5 } + - match: { results.1.rule_criteria_types_counts: { exact: 4, fuzzy: 1 } } --- "List Query Rulesets - empty": diff --git a/x-pack/plugin/ent-search/qa/rest/src/yamlRestTest/resources/rest-api-spec/test/entsearch/rules/40_rule_query_search.yml b/x-pack/plugin/ent-search/qa/rest/src/yamlRestTest/resources/rest-api-spec/test/entsearch/rules/40_rule_query_search.yml index 078e24d86f1c8..845cdb7f9ac19 100644 --- a/x-pack/plugin/ent-search/qa/rest/src/yamlRestTest/resources/rest-api-spec/test/entsearch/rules/40_rule_query_search.yml +++ b/x-pack/plugin/ent-search/qa/rest/src/yamlRestTest/resources/rest-api-spec/test/entsearch/rules/40_rule_query_search.yml @@ -11,6 +11,19 @@ setup: index: number_of_shards: 1 number_of_replicas: 0 + aliases: + test-alias1: { } + + - do: + indices.create: + index: test-index2 + body: + settings: + index: + number_of_shards: 1 + number_of_replicas: 0 + aliases: + test-alias1: { } - do: bulk: @@ -38,6 +51,22 @@ setup: - index: _id: doc7 - { "text": "observability" } + - index: + _id: doc8 + - { "text": "elasticsearch" } + + - do: + bulk: + refresh: true + index: test-index2 + body: + - index: + _id: another-doc + - { "text": "you know, for search" } + - index: + _id: doc8 + - { "text": "elasticsearch" } + - do: query_rules.put_ruleset: @@ -82,6 +111,15 @@ setup: actions: ids: - 'doc7' + - rule_id: rule5 + type: exclude + criteria: + - type: exact + metadata: query_string + values: [ search ] + actions: + ids: + - 'doc8' - do: query_rules.put_ruleset: @@ -115,6 +153,21 @@ teardown: ruleset_id: combined-ruleset ignore: 404 + - do: + query_rules.delete_ruleset: + ruleset_id: alias-ruleset + ignore: 404 + + - do: + query_rules.delete_ruleset: + ruleset_id: double-jeopardy-ruleset + ignore: 404 + + - do: + query_rules.delete_ruleset: + ruleset_id: multiple-exclude-ruleset + ignore: 404 + --- "Perform a rule query specifying a ruleset that does not exist": - do: @@ -165,7 +218,7 @@ teardown: foo: bar --- -"Perform a rule query with malformed rule": +"Perform a search with malformed rule query": - do: catch: bad_request search: @@ -184,6 +237,7 @@ teardown: - do: search: + index: test-index1 body: query: rule: @@ -208,6 +262,7 @@ teardown: - do: headers: { Authorization: "Basic ZW50c2VhcmNoLXVzZXI6ZW50c2VhcmNoLXVzZXItcGFzc3dvcmQ=" } # user search: + index: test-index1 body: query: rule: @@ -294,6 +349,7 @@ teardown: - do: search: + index: test-index1 body: query: rule: @@ -310,6 +366,190 @@ teardown: - match: { hits.hits.0._id: 'doc2' } - match: { hits.hits.1._id: 'doc3' } +--- +"Perform a query over an alias, where one document is pinned specifying the index": + - do: + query_rules.put_ruleset: + ruleset_id: alias-ruleset + body: + rules: + - rule_id: rule1 + type: pinned + criteria: + - type: exact + metadata: foo + values: [ bar ] + actions: + docs: + - '_index': 'test-index1' + '_id': 'doc8' + - rule_id: rule2 + type: exclude + criteria: + - type: exact + metadata: foo + values: [ baz ] + actions: + docs: + - '_index': 'test-index1' + '_id': 'doc8' + + - do: + search: + body: + query: + rule: + organic: + match_none: { } + match_criteria: + foo: bar + ruleset_ids: + - alias-ruleset + + - match: { hits.total.value: 1 } + - match: { hits.hits.0._id: 'doc8' } + - match: { hits.hits.0._index: 'test-index1' } + + - do: + search: + body: + query: + rule: + organic: + query_string: + query: elasticsearch + match_criteria: + foo: baz + ruleset_ids: + - alias-ruleset + + - match: { hits.total.value: 1 } + - match: { hits.hits.0._id: 'doc8' } + - match: { hits.hits.0._index: 'test-index2' } + + - do: + search: + body: + query: + rule: + organic: + query_string: + query: elasticsearch + match_criteria: + foo: not-a-match + ruleset_ids: + - alias-ruleset + + - match: { hits.total.value: 2 } + +--- +"Perform a query where the same ID is both pinned and excluded, leading it to be excluded": + - do: + query_rules.put_ruleset: + ruleset_id: double-jeopardy-ruleset + body: + rules: + - rule_id: rule1 + type: pinned + criteria: + - type: exact + metadata: foo + values: [ bar ] + actions: + ids: + - 'doc8' + - rule_id: rule2 + type: exclude + criteria: + - type: exact + metadata: foo + values: [ bar ] + actions: + ids: + - 'doc8' + + - do: + search: + index: test-index2 + body: + query: + query_string: + query: elasticsearch + + - match: { hits.total.value: 1 } + + - do: + search: + index: test-index2 + body: + query: + rule: + organic: + query_string: + query: elasticsearch + match_criteria: + foo: bar + ruleset_ids: + - double-jeopardy-ruleset + + - match: { hits.total.value: 0 } + +--- +"Perform a query that matches multiple exclude rules": + - do: + query_rules.put_ruleset: + ruleset_id: multiple-exclude-ruleset + body: + rules: + - rule_id: rule1 + type: exclude + criteria: + - type: exact + metadata: foo + values: [ bar ] + actions: + ids: + - 'doc1' + - rule_id: rule2 + type: exclude + criteria: + - type: exact + metadata: foo + values: [ bar ] + actions: + ids: + - 'doc8' + + - do: + search: + index: test-index1 + body: + query: + query_string: + query: elasticsearch is elastic search + + - match: { hits.total.value: 3 } + - match: { hits.hits.0._id: 'doc1' } + - match: { hits.hits.1._id: 'doc8' } + - match: { hits.hits.2._id: 'doc4' } + + - do: + search: + index: test-index1 + body: + query: + rule: + organic: + query_string: + query: elasticsearch is elastic search + match_criteria: + foo: bar + ruleset_ids: + - multiple-exclude-ruleset + + - match: { hits.total.value: 1 } + - match: { hits.hits.0._id: 'doc4' } + --- "Perform a rule query over a ruleset with combined numeric and text rule matching": @@ -509,6 +749,7 @@ teardown: - do: search: + index: test-index1 body: query: rule: @@ -634,6 +875,7 @@ teardown: - do: search: + index: test-index1 body: query: rule_query: @@ -651,4 +893,3 @@ teardown: - match: { hits.total.value: 2 } - match: { hits.hits.0._id: 'doc1' } - match: { hits.hits.1._id: 'doc4' } - diff --git a/x-pack/plugin/ent-search/qa/rest/src/yamlRestTest/resources/rest-api-spec/test/entsearch/rules/50_query_rule_put.yml b/x-pack/plugin/ent-search/qa/rest/src/yamlRestTest/resources/rest-api-spec/test/entsearch/rules/50_query_rule_put.yml index fb3d7be9d2367..285ae77e1f47f 100644 --- a/x-pack/plugin/ent-search/qa/rest/src/yamlRestTest/resources/rest-api-spec/test/entsearch/rules/50_query_rule_put.yml +++ b/x-pack/plugin/ent-search/qa/rest/src/yamlRestTest/resources/rest-api-spec/test/entsearch/rules/50_query_rule_put.yml @@ -101,7 +101,7 @@ teardown: - 'id1' - 'id2' - rule_id: query-rule-id2 - type: pinned + type: exclude criteria: - type: exact metadata: query_string @@ -122,7 +122,7 @@ teardown: - match: rules: - rule_id: query-rule-id2 - type: pinned + type: exclude criteria: - type: exact metadata: query_string @@ -169,7 +169,7 @@ teardown: - match: rules: - rule_id: query-rule-id2 - type: pinned + type: exclude criteria: - type: exact metadata: query_string @@ -225,7 +225,7 @@ teardown: - match: rules: - rule_id: query-rule-id2 - type: pinned + type: exclude criteria: - type: exact metadata: query_string diff --git a/x-pack/plugin/ent-search/src/main/java/org/elasticsearch/xpack/application/rules/AppliedQueryRules.java b/x-pack/plugin/ent-search/src/main/java/org/elasticsearch/xpack/application/rules/AppliedQueryRules.java index 96ad782a06f17..885b84b200e04 100644 --- a/x-pack/plugin/ent-search/src/main/java/org/elasticsearch/xpack/application/rules/AppliedQueryRules.java +++ b/x-pack/plugin/ent-search/src/main/java/org/elasticsearch/xpack/application/rules/AppliedQueryRules.java @@ -7,31 +7,31 @@ package org.elasticsearch.xpack.application.rules; +import org.elasticsearch.xpack.searchbusinessrules.SpecifiedDocument; + import java.util.ArrayList; import java.util.List; -import static org.elasticsearch.xpack.searchbusinessrules.PinnedQueryBuilder.Item; - public class AppliedQueryRules { - private final List pinnedIds; - private final List pinnedDocs; + private final List pinnedDocs; + private final List excludedDocs; public AppliedQueryRules() { this(new ArrayList<>(0), new ArrayList<>(0)); } - public AppliedQueryRules(List pinnedIds, List pinnedDocs) { - this.pinnedIds = pinnedIds; + public AppliedQueryRules(List pinnedDocs, List excludedDocs) { this.pinnedDocs = pinnedDocs; + this.excludedDocs = excludedDocs; } - public List pinnedIds() { - return pinnedIds; + public List pinnedDocs() { + return pinnedDocs; } - public List pinnedDocs() { - return pinnedDocs; + public List excludedDocs() { + return excludedDocs; } } diff --git a/x-pack/plugin/ent-search/src/main/java/org/elasticsearch/xpack/application/rules/QueryRule.java b/x-pack/plugin/ent-search/src/main/java/org/elasticsearch/xpack/application/rules/QueryRule.java index 33fa74e5178cf..0ecb35531ac09 100644 --- a/x-pack/plugin/ent-search/src/main/java/org/elasticsearch/xpack/application/rules/QueryRule.java +++ b/x-pack/plugin/ent-search/src/main/java/org/elasticsearch/xpack/application/rules/QueryRule.java @@ -23,7 +23,7 @@ import org.elasticsearch.xcontent.XContentParser; import org.elasticsearch.xcontent.XContentParserConfiguration; import org.elasticsearch.xcontent.XContentType; -import org.elasticsearch.xpack.searchbusinessrules.PinnedQueryBuilder; +import org.elasticsearch.xpack.searchbusinessrules.SpecifiedDocument; import java.io.IOException; import java.util.ArrayList; @@ -31,14 +31,11 @@ import java.util.Locale; import java.util.Map; import java.util.Objects; +import java.util.Set; import static org.elasticsearch.xcontent.ConstructingObjectParser.constructorArg; import static org.elasticsearch.xcontent.ConstructingObjectParser.optionalConstructorArg; import static org.elasticsearch.xpack.application.rules.QueryRuleCriteriaType.ALWAYS; -import static org.elasticsearch.xpack.searchbusinessrules.PinnedQueryBuilder.DOCS_FIELD; -import static org.elasticsearch.xpack.searchbusinessrules.PinnedQueryBuilder.IDS_FIELD; -import static org.elasticsearch.xpack.searchbusinessrules.PinnedQueryBuilder.Item.INDEX_FIELD; -import static org.elasticsearch.xpack.searchbusinessrules.PinnedQueryBuilder.MAX_NUM_PINNED_HITS; /** * A query rule consists of: @@ -51,6 +48,11 @@ */ public class QueryRule implements Writeable, ToXContentObject { + public static final int MAX_NUM_DOCS_IN_RULE = 100; + public static final ParseField IDS_FIELD = new ParseField("ids"); + public static final ParseField DOCS_FIELD = new ParseField("docs"); + public static final ParseField INDEX_FIELD = new ParseField("_index"); + private final String id; private final QueryRuleType type; private final List criteria; @@ -61,6 +63,7 @@ public class QueryRule implements Writeable, ToXContentObject { public static final int MAX_PRIORITY = 1000000; public enum QueryRuleType { + EXCLUDE, PINNED; public static QueryRuleType queryRuleType(String type) { @@ -137,32 +140,31 @@ public QueryRule(StreamInput in) throws IOException { } private void validate() { - if (type == QueryRuleType.PINNED) { - boolean ruleContainsPinnedIds = actions.containsKey(IDS_FIELD.getPreferredName()); - boolean ruleContainsPinnedDocs = actions.containsKey(DOCS_FIELD.getPreferredName()); - if (ruleContainsPinnedIds ^ ruleContainsPinnedDocs) { - validatePinnedAction(actions.get(IDS_FIELD.getPreferredName())); - validatePinnedAction(actions.get(DOCS_FIELD.getPreferredName())); - } else { - throw new ElasticsearchParseException("pinned query rule actions must contain only one of either ids or docs"); - } - } else { - throw new IllegalArgumentException("Unsupported QueryRuleType: " + type); - } if (priority != null && (priority < MIN_PRIORITY || priority > MAX_PRIORITY)) { throw new IllegalArgumentException("Priority was " + priority + ", must be between " + MIN_PRIORITY + " and " + MAX_PRIORITY); } + + if (Set.of(QueryRuleType.PINNED, QueryRuleType.EXCLUDE).contains(type)) { + boolean ruleContainsIds = actions.containsKey(IDS_FIELD.getPreferredName()); + boolean ruleContainsDocs = actions.containsKey(DOCS_FIELD.getPreferredName()); + if (ruleContainsIds ^ ruleContainsDocs) { + validateIdOrDocAction(actions.get(IDS_FIELD.getPreferredName())); + validateIdOrDocAction(actions.get(DOCS_FIELD.getPreferredName())); + } else { + throw new ElasticsearchParseException(type.toString() + " query rule actions must contain only one of either ids or docs"); + } + } } - private void validatePinnedAction(Object action) { + private void validateIdOrDocAction(Object action) { if (action != null) { if (action instanceof List == false) { - throw new ElasticsearchParseException("pinned query rule actions must be a list"); + throw new ElasticsearchParseException(type + " query rule actions must be a list"); } else if (((List) action).isEmpty()) { - throw new ElasticsearchParseException("pinned query rule actions cannot be empty"); - } else if (((List) action).size() > MAX_NUM_PINNED_HITS) { - throw new ElasticsearchParseException("pinned hits cannot exceed " + MAX_NUM_PINNED_HITS); + throw new ElasticsearchParseException(type + " query rule actions cannot be empty"); + } else if (((List) action).size() > MAX_NUM_DOCS_IN_RULE) { + throw new ElasticsearchParseException(type + " documents cannot exceed " + MAX_NUM_DOCS_IN_RULE); } } } @@ -316,14 +318,22 @@ public String toString() { return Strings.toString(this); } - @SuppressWarnings("unchecked") public AppliedQueryRules applyRule(AppliedQueryRules appliedRules, Map matchCriteria) { - if (type != QueryRule.QueryRuleType.PINNED) { - throw new UnsupportedOperationException("Only pinned query rules are supported"); + List pinnedDocs = appliedRules.pinnedDocs(); + List excludedDocs = appliedRules.excludedDocs(); + List matchingDocs = identifyMatchingDocs(matchCriteria); + + switch (type) { + case PINNED -> pinnedDocs.addAll(matchingDocs); + case EXCLUDE -> excludedDocs.addAll(matchingDocs); + default -> throw new IllegalStateException("Unsupported query rule type: " + type); } + return new AppliedQueryRules(pinnedDocs, excludedDocs); + } - List matchingPinnedIds = new ArrayList<>(); - List matchingPinnedDocs = new ArrayList<>(); + @SuppressWarnings("unchecked") + private List identifyMatchingDocs(Map matchCriteria) { + List matchingDocs = new ArrayList<>(); Boolean isRuleMatch = null; // All specified criteria in a rule must match for the rule to be applied @@ -342,25 +352,23 @@ public AppliedQueryRules applyRule(AppliedQueryRules appliedRules, Map) actions.get(IDS_FIELD.getPreferredName())); + matchingDocs.addAll( + ((List) actions.get(IDS_FIELD.getPreferredName())).stream().map(id -> new SpecifiedDocument(null, id)).toList() + ); } else if (actions.containsKey(DOCS_FIELD.getPreferredName())) { List> docsToPin = (List>) actions.get(DOCS_FIELD.getPreferredName()); - List items = docsToPin.stream() + List specifiedDocuments = docsToPin.stream() .map( - map -> new PinnedQueryBuilder.Item( + map -> new SpecifiedDocument( map.get(INDEX_FIELD.getPreferredName()), - map.get(PinnedQueryBuilder.Item.ID_FIELD.getPreferredName()) + map.get(SpecifiedDocument.ID_FIELD.getPreferredName()) ) ) .toList(); - matchingPinnedDocs.addAll(items); + matchingDocs.addAll(specifiedDocuments); } } - - List pinnedIds = appliedRules.pinnedIds(); - List pinnedDocs = appliedRules.pinnedDocs(); - pinnedIds.addAll(matchingPinnedIds); - pinnedDocs.addAll(matchingPinnedDocs); - return new AppliedQueryRules(pinnedIds, pinnedDocs); + return matchingDocs; } + } diff --git a/x-pack/plugin/ent-search/src/main/java/org/elasticsearch/xpack/application/rules/RuleQueryBuilder.java b/x-pack/plugin/ent-search/src/main/java/org/elasticsearch/xpack/application/rules/RuleQueryBuilder.java index 80fbedc2aa7af..a3703a5005979 100644 --- a/x-pack/plugin/ent-search/src/main/java/org/elasticsearch/xpack/application/rules/RuleQueryBuilder.java +++ b/x-pack/plugin/ent-search/src/main/java/org/elasticsearch/xpack/application/rules/RuleQueryBuilder.java @@ -20,8 +20,12 @@ import org.elasticsearch.common.io.stream.StreamInput; import org.elasticsearch.common.io.stream.StreamOutput; import org.elasticsearch.common.logging.HeaderWarning; +import org.elasticsearch.index.mapper.IdFieldMapper; +import org.elasticsearch.index.mapper.IndexFieldMapper; import org.elasticsearch.index.query.AbstractQueryBuilder; +import org.elasticsearch.index.query.BoolQueryBuilder; import org.elasticsearch.index.query.QueryBuilder; +import org.elasticsearch.index.query.QueryBuilders; import org.elasticsearch.index.query.QueryRewriteContext; import org.elasticsearch.index.query.SearchExecutionContext; import org.elasticsearch.license.LicenseUtils; @@ -32,7 +36,7 @@ import org.elasticsearch.xcontent.XContentParser; import org.elasticsearch.xcontent.XContentType; import org.elasticsearch.xpack.searchbusinessrules.PinnedQueryBuilder; -import org.elasticsearch.xpack.searchbusinessrules.PinnedQueryBuilder.Item; +import org.elasticsearch.xpack.searchbusinessrules.SpecifiedDocument; import java.io.IOException; import java.util.List; @@ -68,8 +72,8 @@ public class RuleQueryBuilder extends AbstractQueryBuilder { private final Map matchCriteria; private final QueryBuilder organicQuery; - private final Supplier> pinnedIdsSupplier; - private final Supplier> pinnedDocsSupplier; + private final Supplier> pinnedDocsSupplier; + private final Supplier> excludedDocsSupplier; @Override public TransportVersion getMinimalSupportedVersion() { @@ -89,18 +93,18 @@ public RuleQueryBuilder(StreamInput in) throws IOException { } else { rulesetIds = List.of(in.readString()); in.readOptionalStringCollectionAsList(); - in.readOptionalCollectionAsList(Item::new); + in.readOptionalCollectionAsList(SpecifiedDocument::new); } - pinnedIdsSupplier = null; pinnedDocsSupplier = null; + excludedDocsSupplier = null; } private RuleQueryBuilder( QueryBuilder organicQuery, Map matchCriteria, List rulesetIds, - Supplier> pinnedIdsSupplier, - Supplier> pinnedDocsSupplier + Supplier> pinnedDocsSupplier, + Supplier> excludedDocsSupplier ) { if (organicQuery == null) { @@ -124,18 +128,18 @@ private RuleQueryBuilder( this.organicQuery = organicQuery; this.matchCriteria = matchCriteria; this.rulesetIds = rulesetIds; - this.pinnedIdsSupplier = pinnedIdsSupplier; this.pinnedDocsSupplier = pinnedDocsSupplier; + this.excludedDocsSupplier = excludedDocsSupplier; } @Override protected void doWriteTo(StreamOutput out) throws IOException { - if (pinnedIdsSupplier != null) { - throw new IllegalStateException("pinnedIdsSupplier must be null, can't serialize suppliers, missing a rewriteAndFetch?"); - } if (pinnedDocsSupplier != null) { throw new IllegalStateException("pinnedDocsSupplier must be null, can't serialize suppliers, missing a rewriteAndFetch?"); } + if (excludedDocsSupplier != null) { + throw new IllegalStateException("excludedDocsSupplier must be null, can't serialize suppliers, missing a rewriteAndFetch?"); + } out.writeNamedWriteable(organicQuery); out.writeGenericMap(matchCriteria); @@ -176,18 +180,11 @@ protected void doXContent(XContentBuilder builder, Params params) throws IOExcep @Override protected Query doToQuery(SearchExecutionContext context) throws IOException { // NOTE: this is old query logic, as in 8.12.2+ and 8.13.0+ we will always rewrite this query - // into a pinned query or the organic query. This logic remains here for backwards compatibility + // into a pinned/boolean query or the organic query. This logic remains here for backwards compatibility // with coordinator nodes running versions 8.10.0 - 8.12.1. - List pinnedIds = pinnedIdsSupplier != null ? pinnedIdsSupplier.get() : null; - List pinnedDocs = pinnedDocsSupplier != null ? pinnedDocsSupplier.get() : null; - if ((pinnedIds != null && pinnedIds.isEmpty() == false) && (pinnedDocs != null && pinnedDocs.isEmpty() == false)) { - throw new IllegalArgumentException("applied rules contain both pinned ids and pinned docs, only one of ids or docs is allowed"); - } - if (pinnedIds != null && pinnedIds.isEmpty() == false) { - PinnedQueryBuilder pinnedQueryBuilder = new PinnedQueryBuilder(organicQuery, pinnedIds.toArray(new String[0])); - return pinnedQueryBuilder.toQuery(context); - } else if (pinnedDocs != null && pinnedDocs.isEmpty() == false) { - PinnedQueryBuilder pinnedQueryBuilder = new PinnedQueryBuilder(organicQuery, pinnedDocs.toArray(new Item[0])); + List pinnedDocs = pinnedDocsSupplier != null ? pinnedDocsSupplier.get() : null; + if (pinnedDocs != null && pinnedDocs.isEmpty() == false) { + PinnedQueryBuilder pinnedQueryBuilder = new PinnedQueryBuilder(organicQuery, pinnedDocs.toArray(new SpecifiedDocument[0])); return pinnedQueryBuilder.toQuery(context); } else { return organicQuery.toQuery(context); @@ -196,26 +193,43 @@ protected Query doToQuery(SearchExecutionContext context) throws IOException { @Override protected QueryBuilder doRewrite(QueryRewriteContext queryRewriteContext) { - if (pinnedIdsSupplier != null && pinnedDocsSupplier != null) { - List identifiedPinnedIds = pinnedIdsSupplier.get(); - List identifiedPinnedDocs = pinnedDocsSupplier.get(); - if (identifiedPinnedIds == null || identifiedPinnedDocs == null) { - return this; // Not executed yet - } else if (identifiedPinnedIds.isEmpty() && identifiedPinnedDocs.isEmpty()) { - return organicQuery; // Nothing to pin here - } else if (identifiedPinnedIds.isEmpty() == false && identifiedPinnedDocs.isEmpty() == false) { - throw new IllegalArgumentException( - "applied rules contain both pinned ids and pinned docs, only one of ids or docs is allowed" - ); - } else if (identifiedPinnedIds.isEmpty() == false) { - return new PinnedQueryBuilder(organicQuery, truncateList(identifiedPinnedIds).toArray(new String[0])); + + if (pinnedDocsSupplier != null && excludedDocsSupplier != null) { + List identifiedPinnedDocs = pinnedDocsSupplier.get(); + List identifiedExcludedDocs = excludedDocsSupplier.get(); + + if (identifiedPinnedDocs == null || identifiedExcludedDocs == null) { + // Not executed yet + return this; + } + + if (identifiedPinnedDocs.isEmpty() && identifiedExcludedDocs.isEmpty()) { + // Nothing to do, just return the organic query + return organicQuery; + } + + if (identifiedPinnedDocs.isEmpty() == false && identifiedExcludedDocs.isEmpty()) { + // We have pinned IDs but nothing to exclude + return new PinnedQueryBuilder(organicQuery, truncateList(identifiedPinnedDocs).toArray(new SpecifiedDocument[0])); + } + + if (identifiedPinnedDocs.isEmpty()) { + // We have excluded IDs but nothing to pin + QueryBuilder excludedDocsQueryBuilder = buildExcludedDocsQuery(identifiedExcludedDocs); + return new BoolQueryBuilder().must(organicQuery).mustNot(excludedDocsQueryBuilder); } else { - return new PinnedQueryBuilder(organicQuery, truncateList(identifiedPinnedDocs).toArray(new Item[0])); + // We have documents to both pin and exclude + QueryBuilder pinnedQuery = new PinnedQueryBuilder( + organicQuery, + truncateList(identifiedPinnedDocs).toArray(new SpecifiedDocument[0]) + ); + QueryBuilder excludedDocsQueryBuilder = buildExcludedDocsQuery(identifiedExcludedDocs); + return new BoolQueryBuilder().must(pinnedQuery).mustNot(excludedDocsQueryBuilder); } } - SetOnce> pinnedIdsSetOnce = new SetOnce<>(); - SetOnce> pinnedDocsSetOnce = new SetOnce<>(); + SetOnce> pinnedDocsSetOnce = new SetOnce<>(); + SetOnce> excludedDocsSetOnce = new SetOnce<>(); AppliedQueryRules appliedRules = new AppliedQueryRules(); // Identify matching rules and apply them as applicable @@ -255,19 +269,40 @@ protected QueryBuilder doRewrite(QueryRewriteContext queryRewriteContext) { } } - pinnedIdsSetOnce.set(appliedRules.pinnedIds().stream().distinct().toList()); pinnedDocsSetOnce.set(appliedRules.pinnedDocs().stream().distinct().toList()); + excludedDocsSetOnce.set(appliedRules.excludedDocs().stream().distinct().toList()); listener.onResponse(null); }, listener::onFailure) ); }); - return new RuleQueryBuilder(organicQuery, matchCriteria, this.rulesetIds, pinnedIdsSetOnce::get, pinnedDocsSetOnce::get).boost( + return new RuleQueryBuilder(organicQuery, matchCriteria, this.rulesetIds, pinnedDocsSetOnce::get, excludedDocsSetOnce::get).boost( this.boost ).queryName(this.queryName); } + private QueryBuilder buildExcludedDocsQuery(List identifiedExcludedDocs) { + QueryBuilder excludedDocsQueryBuilder; + if (identifiedExcludedDocs.stream().allMatch(item -> item.index() == null)) { + // Easy case - just add an ids query + excludedDocsQueryBuilder = QueryBuilders.idsQuery() + .addIds(identifiedExcludedDocs.stream().map(SpecifiedDocument::id).toArray(String[]::new)); + } else { + // Here, we have to create Boolean queries for the _id and _index fields + excludedDocsQueryBuilder = QueryBuilders.boolQuery(); + identifiedExcludedDocs.stream().map(item -> { + BoolQueryBuilder excludeQueryBuilder = QueryBuilders.boolQuery() + .must(QueryBuilders.termQuery(IdFieldMapper.NAME, item.id())); + if (item.index() != null) { + excludeQueryBuilder.must(QueryBuilders.termQuery(IndexFieldMapper.NAME, item.index())); + } + return excludeQueryBuilder; + }).forEach(excludeQueryBuilder -> ((BoolQueryBuilder) excludedDocsQueryBuilder).must(excludeQueryBuilder)); + } + return excludedDocsQueryBuilder; + } + private List truncateList(List input) { // PinnedQueryBuilder will return an error if we attempt to return more than the maximum number of // pinned hits. Here, we truncate matching rules rather than return an error. @@ -285,13 +320,13 @@ protected boolean doEquals(RuleQueryBuilder other) { return Objects.equals(rulesetIds, other.rulesetIds) && Objects.equals(matchCriteria, other.matchCriteria) && Objects.equals(organicQuery, other.organicQuery) - && Objects.equals(pinnedIdsSupplier, other.pinnedIdsSupplier) - && Objects.equals(pinnedDocsSupplier, other.pinnedDocsSupplier); + && Objects.equals(pinnedDocsSupplier, other.pinnedDocsSupplier) + && Objects.equals(excludedDocsSupplier, other.excludedDocsSupplier); } @Override protected int doHashCode() { - return Objects.hash(rulesetIds, matchCriteria, organicQuery, pinnedIdsSupplier, pinnedDocsSupplier); + return Objects.hash(rulesetIds, matchCriteria, organicQuery, pinnedDocsSupplier, excludedDocsSupplier); } private static final ConstructingObjectParser PARSER = new ConstructingObjectParser<>( diff --git a/x-pack/plugin/ent-search/src/test/java/org/elasticsearch/xpack/application/rules/QueryRuleTests.java b/x-pack/plugin/ent-search/src/test/java/org/elasticsearch/xpack/application/rules/QueryRuleTests.java index 3f65a9a6f58c5..67e7f6ac7d9e9 100644 --- a/x-pack/plugin/ent-search/src/test/java/org/elasticsearch/xpack/application/rules/QueryRuleTests.java +++ b/x-pack/plugin/ent-search/src/test/java/org/elasticsearch/xpack/application/rules/QueryRuleTests.java @@ -18,6 +18,7 @@ import org.elasticsearch.xcontent.XContentParser; import org.elasticsearch.xcontent.XContentType; import org.elasticsearch.xpack.application.EnterpriseSearchModuleTestUtils; +import org.elasticsearch.xpack.searchbusinessrules.SpecifiedDocument; import org.junit.Before; import java.io.IOException; @@ -99,7 +100,22 @@ public void testToXContentValidPinnedRulesWithIds() throws IOException { "ids": ["id1", "id2"] } }"""); - testToXContentPinnedRules(content); + testToXContentRules(content); + } + + public void testToXContentValidExcludedRulesWithIds() throws IOException { + String content = XContentHelper.stripWhitespace(""" + { + "rule_id": "my_query_rule", + "type": "exclude", + "criteria": [ + { "type": "exact", "metadata": "query_string", "values": ["foo", "bar"] } + ], + "actions": { + "ids": ["id1", "id2"] + } + }"""); + testToXContentRules(content); } public void testToXContentValidPinnedRulesWithDocs() throws IOException { @@ -123,10 +139,102 @@ public void testToXContentValidPinnedRulesWithDocs() throws IOException { ] } }"""); - testToXContentPinnedRules(content); + testToXContentRules(content); + } + + public void testToXContentValidExcludedRulesWithDocs() throws IOException { + String content = XContentHelper.stripWhitespace(""" + { + "rule_id": "my_query_rule", + "type": "exclude", + "criteria": [ + { "type": "exact", "metadata": "query_string", "values": ["foo", "bar"] } + ], + "actions": { + "docs": [ + { + "_index": "foo", + "_id": "id1" + }, + { + "_index": "bar", + "_id": "id2" + } + ] + } + }"""); + testToXContentRules(content); + } + + public void testToXContentValidPinnedAndExcludedRulesWithIds() throws IOException { + String content = XContentHelper.stripWhitespace(""" + { + "rule_id": "my_pinned_query_rule", + "type": "pinned", + "criteria": [ + { "type": "exact", "metadata": "query_string", "values": ["foo", "bar"] } + ], + "actions": { + "ids": ["id1", "id2"] + } + }, + { + "rule_id": "my_exclude_query_rule", + "type": "exlude", + "criteria": [ + { "type": "exact", "metadata": "query_string", "values": ["baz"] } + ], + "actions": { + "ids": ["id3", "id4"] + } + }"""); + testToXContentRules(content); } - private void testToXContentPinnedRules(String content) throws IOException { + public void testToXContentValidPinnedAndExcludedRulesWithDocs() throws IOException { + String content = XContentHelper.stripWhitespace(""" + { + "rule_id": "my_pinned_query_rule", + "type": "pinned", + "criteria": [ + { "type": "exact", "metadata": "query_string", "values": ["foo", "bar"] } + ], + "actions": { + "docs": [ + { + "_index": "foo", + "_id": "id1" + }, + { + "_index": "bar", + "_id": "id2" + } + ] + } + }, + { + "rule_id": "my_exclude_query_rule", + "type": "exclude", + "criteria": [ + { "type": "exact", "metadata": "query_string", "values": ["foo", "bar"] } + ], + "actions": { + "docs": [ + { + "_index": "foo", + "_id": "id3" + }, + { + "_index": "bar", + "_id": "id4" + } + ] + } + }"""); + testToXContentRules(content); + } + + private void testToXContentRules(String content) throws IOException { QueryRule queryRule = QueryRule.fromXContentBytes(new BytesArray(content), XContentType.JSON); boolean humanReadable = true; BytesReference originalBytes = toShuffledXContent(queryRule, XContentType.JSON, ToXContent.EMPTY_PARAMS, humanReadable); @@ -152,7 +260,22 @@ public void testToXContentPinnedRuleWithInvalidActions() throws IOException { expectThrows(IllegalArgumentException.class, () -> QueryRule.fromXContentBytes(new BytesArray(content), XContentType.JSON)); } - public void testApplyRuleWithOneCriteria() { + public void testToXContentExcludeRuleWithInvalidActions() throws IOException { + String content = XContentHelper.stripWhitespace(""" + { + "rule_id": "my_query_rule", + "type": "exclude", + "criteria": [ + { "type": "exact", "metadata": "query_string", "values": ["foo", "bar"] } + ], + "actions": { + "foo": "bar" + } + }"""); + expectThrows(IllegalArgumentException.class, () -> QueryRule.fromXContentBytes(new BytesArray(content), XContentType.JSON)); + } + + public void testApplyPinnedRuleWithOneCriteria() { QueryRule rule = new QueryRule( randomAlphaOfLength(10), QueryRule.QueryRuleType.PINNED, @@ -162,14 +285,35 @@ public void testApplyRuleWithOneCriteria() { ); AppliedQueryRules appliedQueryRules = new AppliedQueryRules(); rule.applyRule(appliedQueryRules, Map.of("query", "elastic")); - assertEquals(List.of("id1", "id2"), appliedQueryRules.pinnedIds()); + assertEquals(List.of(new SpecifiedDocument(null, "id1"), new SpecifiedDocument(null, "id2")), appliedQueryRules.pinnedDocs()); + assertEquals(Collections.emptyList(), appliedQueryRules.excludedDocs()); + + appliedQueryRules = new AppliedQueryRules(); + rule.applyRule(appliedQueryRules, Map.of("query", "elastic1")); + assertEquals(Collections.emptyList(), appliedQueryRules.pinnedDocs()); + assertEquals(Collections.emptyList(), appliedQueryRules.excludedDocs()); + } + + public void testApplyExcludeRuleWithOneCriteria() { + QueryRule rule = new QueryRule( + randomAlphaOfLength(10), + QueryRule.QueryRuleType.EXCLUDE, + List.of(new QueryRuleCriteria(EXACT, "query", List.of("elastic"))), + Map.of("ids", List.of("id1", "id2")), + EnterpriseSearchModuleTestUtils.randomQueryRulePriority() + ); + AppliedQueryRules appliedQueryRules = new AppliedQueryRules(); + rule.applyRule(appliedQueryRules, Map.of("query", "elastic")); + assertEquals(List.of(new SpecifiedDocument(null, "id1"), new SpecifiedDocument(null, "id2")), appliedQueryRules.excludedDocs()); + assertEquals(Collections.emptyList(), appliedQueryRules.pinnedDocs()); appliedQueryRules = new AppliedQueryRules(); rule.applyRule(appliedQueryRules, Map.of("query", "elastic1")); - assertEquals(Collections.emptyList(), appliedQueryRules.pinnedIds()); + assertEquals(Collections.emptyList(), appliedQueryRules.excludedDocs()); + assertEquals(Collections.emptyList(), appliedQueryRules.pinnedDocs()); } - public void testApplyRuleWithMultipleCriteria() { + public void testApplyRuleWithMultipleCriteria() throws IOException { QueryRule rule = new QueryRule( randomAlphaOfLength(10), QueryRule.QueryRuleType.PINNED, @@ -179,11 +323,14 @@ public void testApplyRuleWithMultipleCriteria() { ); AppliedQueryRules appliedQueryRules = new AppliedQueryRules(); rule.applyRule(appliedQueryRules, Map.of("query", "elastic - you know, for search")); - assertEquals(List.of("id1", "id2"), appliedQueryRules.pinnedIds()); + assertEquals(List.of(new SpecifiedDocument(null, "id1"), new SpecifiedDocument(null, "id2")), appliedQueryRules.pinnedDocs()); + assertEquals(Collections.emptyList(), appliedQueryRules.excludedDocs()); + assertEquals(Collections.emptyList(), appliedQueryRules.excludedDocs()); appliedQueryRules = new AppliedQueryRules(); rule.applyRule(appliedQueryRules, Map.of("query", "elastic")); - assertEquals(Collections.emptyList(), appliedQueryRules.pinnedIds()); + assertEquals(Collections.emptyList(), appliedQueryRules.pinnedDocs()); + assertEquals(Collections.emptyList(), appliedQueryRules.excludedDocs()); } private void assertXContent(QueryRule queryRule, boolean humanReadable) throws IOException { diff --git a/x-pack/plugin/search-business-rules/src/internalClusterTest/java/org/elasticsearch/xpack/searchbusinessrules/PinnedQueryBuilderIT.java b/x-pack/plugin/search-business-rules/src/internalClusterTest/java/org/elasticsearch/xpack/searchbusinessrules/PinnedQueryBuilderIT.java index 9a28e11ee1cbb..2a17a4a1152cf 100644 --- a/x-pack/plugin/search-business-rules/src/internalClusterTest/java/org/elasticsearch/xpack/searchbusinessrules/PinnedQueryBuilderIT.java +++ b/x-pack/plugin/search-business-rules/src/internalClusterTest/java/org/elasticsearch/xpack/searchbusinessrules/PinnedQueryBuilderIT.java @@ -19,7 +19,6 @@ import org.elasticsearch.search.fetch.subphase.highlight.HighlightBuilder; import org.elasticsearch.search.fetch.subphase.highlight.HighlightField; import org.elasticsearch.test.ESIntegTestCase; -import org.elasticsearch.xpack.searchbusinessrules.PinnedQueryBuilder.Item; import java.util.Collection; import java.util.Collections; @@ -90,11 +89,11 @@ public void testPinnedPromotions() throws Exception { int numPromotions = randomIntBetween(0, totalDocs); LinkedHashSet idPins = new LinkedHashSet<>(); - LinkedHashSet docPins = new LinkedHashSet<>(); + LinkedHashSet docPins = new LinkedHashSet<>(); for (int j = 0; j < numPromotions; j++) { String id = Integer.toString(randomIntBetween(0, totalDocs)); idPins.add(id); - docPins.add(new Item("test", id)); + docPins.add(new SpecifiedDocument("test", id)); } QueryBuilder organicQuery = null; if (i % 5 == 0) { @@ -105,7 +104,12 @@ public void testPinnedPromotions() throws Exception { } assertPinnedPromotions(new PinnedQueryBuilder(organicQuery, idPins.toArray(new String[0])), idPins, i, numRelevantDocs); - assertPinnedPromotions(new PinnedQueryBuilder(organicQuery, docPins.toArray(new Item[0])), idPins, i, numRelevantDocs); + assertPinnedPromotions( + new PinnedQueryBuilder(organicQuery, docPins.toArray(new SpecifiedDocument[0])), + idPins, + i, + numRelevantDocs + ); } } @@ -184,7 +188,7 @@ public void testExhaustiveScoring() throws Exception { QueryBuilder organicQuery = QueryBuilders.queryStringQuery("foo"); assertExhaustiveScoring(new PinnedQueryBuilder(organicQuery, "2")); - assertExhaustiveScoring(new PinnedQueryBuilder(organicQuery, new Item("test", "2"))); + assertExhaustiveScoring(new PinnedQueryBuilder(organicQuery, new SpecifiedDocument("test", "2"))); } private void assertExhaustiveScoring(PinnedQueryBuilder pqb) { @@ -218,7 +222,7 @@ public void testExplain() throws Exception { QueryBuilder organicQuery = QueryBuilders.matchQuery("field1", "the quick brown").operator(Operator.OR); assertExplain(new PinnedQueryBuilder(organicQuery, "2")); - assertExplain(new PinnedQueryBuilder(organicQuery, new Item("test", "2"))); + assertExplain(new PinnedQueryBuilder(organicQuery, new SpecifiedDocument("test", "2"))); } private void assertExplain(PinnedQueryBuilder pqb) { @@ -259,7 +263,7 @@ public void testHighlight() throws Exception { QueryBuilder organicQuery = QueryBuilders.matchQuery("field1", "the quick brown").operator(Operator.OR); assertHighlight(new PinnedQueryBuilder(organicQuery, "2")); - assertHighlight(new PinnedQueryBuilder(organicQuery, new Item("test", "2"))); + assertHighlight(new PinnedQueryBuilder(organicQuery, new SpecifiedDocument("test", "2"))); } private void assertHighlight(PinnedQueryBuilder pqb) { @@ -320,9 +324,9 @@ public void testMultiIndexDocs() throws Exception { PinnedQueryBuilder pqb = new PinnedQueryBuilder( QueryBuilders.queryStringQuery("foo"), - new Item("test2", "a"), - new Item("test1", "a"), - new Item("test1", "b") + new SpecifiedDocument("test2", "a"), + new SpecifiedDocument("test1", "a"), + new SpecifiedDocument("test1", "b") ); assertResponse(prepareSearch().setQuery(pqb).setTrackTotalHits(true).setSearchType(DFS_QUERY_THEN_FETCH), searchResponse -> { @@ -360,9 +364,9 @@ public void testMultiIndexWithAliases() throws Exception { PinnedQueryBuilder pqb = new PinnedQueryBuilder( QueryBuilders.queryStringQuery("document"), - new Item("test", "b"), - new Item("test-alias", "a"), - new Item("test", "a") + new SpecifiedDocument("test", "b"), + new SpecifiedDocument("test-alias", "a"), + new SpecifiedDocument("test", "a") ); assertResponse(prepareSearch().setQuery(pqb).setTrackTotalHits(true).setSearchType(DFS_QUERY_THEN_FETCH), searchResponse -> { @@ -416,11 +420,11 @@ public void testMultiIndexWithAliasesAndDuplicateIds() throws Exception { PinnedQueryBuilder pqb = new PinnedQueryBuilder( QueryBuilders.queryStringQuery("document"), - new Item("test1", "b"), - new Item(null, "a"), - new Item("test1", "c"), - new Item("test1", "a"), - new Item("test-alias", "a") + new SpecifiedDocument("test1", "b"), + new SpecifiedDocument(null, "a"), + new SpecifiedDocument("test1", "c"), + new SpecifiedDocument("test1", "a"), + new SpecifiedDocument("test-alias", "a") ); assertResponse(prepareSearch().setQuery(pqb).setTrackTotalHits(true).setSearchType(DFS_QUERY_THEN_FETCH), searchResponse -> { diff --git a/x-pack/plugin/search-business-rules/src/main/java/org/elasticsearch/xpack/searchbusinessrules/PinnedQueryBuilder.java b/x-pack/plugin/search-business-rules/src/main/java/org/elasticsearch/xpack/searchbusinessrules/PinnedQueryBuilder.java index 45f1fc4939bc1..0ef7eebffadaf 100644 --- a/x-pack/plugin/search-business-rules/src/main/java/org/elasticsearch/xpack/searchbusinessrules/PinnedQueryBuilder.java +++ b/x-pack/plugin/search-business-rules/src/main/java/org/elasticsearch/xpack/searchbusinessrules/PinnedQueryBuilder.java @@ -16,11 +16,8 @@ import org.elasticsearch.TransportVersion; import org.elasticsearch.TransportVersions; import org.elasticsearch.common.ParsingException; -import org.elasticsearch.common.Strings; import org.elasticsearch.common.io.stream.StreamInput; import org.elasticsearch.common.io.stream.StreamOutput; -import org.elasticsearch.common.io.stream.Writeable; -import org.elasticsearch.common.regex.Regex; import org.elasticsearch.index.mapper.IdFieldMapper; import org.elasticsearch.index.mapper.MappedFieldType; import org.elasticsearch.index.query.AbstractQueryBuilder; @@ -29,7 +26,6 @@ import org.elasticsearch.index.query.SearchExecutionContext; import org.elasticsearch.xcontent.ConstructingObjectParser; import org.elasticsearch.xcontent.ParseField; -import org.elasticsearch.xcontent.ToXContentObject; import org.elasticsearch.xcontent.XContentBuilder; import org.elasticsearch.xcontent.XContentParser; @@ -57,132 +53,26 @@ public class PinnedQueryBuilder extends AbstractQueryBuilder public static final ParseField DOCS_FIELD = new ParseField("docs"); public static final ParseField ORGANIC_QUERY_FIELD = new ParseField("organic"); - private static final TransportVersion OPTIONAL_INDEX_IN_DOCS_VERSION = TransportVersions.V_8_11_X; - private final List ids; - private final List docs; + private final List docs; private QueryBuilder organicQuery; // Organic queries will have their scores capped to this number range, // We reserve the highest float exponent for scores of pinned queries private static final float MAX_ORGANIC_SCORE = Float.intBitsToFloat((0xfe << 23)) - 1; - /** - * A single item to be used for a {@link PinnedQueryBuilder}. - */ - public static final class Item implements ToXContentObject, Writeable { - public static final String NAME = "item"; - - public static final ParseField INDEX_FIELD = new ParseField("_index"); - public static final ParseField ID_FIELD = new ParseField("_id"); - - private final String index; - private final String id; - - /** - * Constructor for a given item request - * - * @param index the index where the document is located - * @param id and its id - */ - public Item(String index, String id) { - if (index != null && Regex.isSimpleMatchPattern(index)) { - throw new IllegalArgumentException("Item index cannot contain wildcard expressions"); - } - if (id == null) { - throw new IllegalArgumentException("Item requires id to be non-null"); - } - this.index = index; - this.id = id; - } - - private Item(String id) { - this.index = null; - this.id = id; - } - - /** - * Read from a stream. - */ - public Item(StreamInput in) throws IOException { - if (in.getTransportVersion().onOrAfter(OPTIONAL_INDEX_IN_DOCS_VERSION)) { - index = in.readOptionalString(); - } else { - index = in.readString(); - } - id = in.readString(); - } - - @Override - public void writeTo(StreamOutput out) throws IOException { - if (out.getTransportVersion().onOrAfter(OPTIONAL_INDEX_IN_DOCS_VERSION)) { - out.writeOptionalString(index); - } else { - if (index == null) { - throw new IllegalArgumentException( - "[_index] needs to be specified for docs elements when cluster nodes are not in the same version" - ); - } - out.writeString(index); - } - out.writeString(id); - } - - @Override - public XContentBuilder toXContent(XContentBuilder builder, Params params) throws IOException { - builder.startObject(); - if (this.index != null) { - builder.field(INDEX_FIELD.getPreferredName(), this.index); - } - builder.field(ID_FIELD.getPreferredName(), this.id); - return builder.endObject(); - } - - private static final ConstructingObjectParser PARSER = new ConstructingObjectParser<>( - NAME, - a -> new Item((String) a[0], (String) a[1]) - ); - - static { - PARSER.declareString(optionalConstructorArg(), INDEX_FIELD); - PARSER.declareString(constructorArg(), ID_FIELD); - } - - @Override - public String toString() { - return Strings.toString(this, true, true); - } - - @Override - public int hashCode() { - return Objects.hash(index, id); - } - - @Override - public boolean equals(Object o) { - if (this == o) { - return true; - } - if ((o instanceof Item) == false) { - return false; - } - Item other = (Item) o; - return Objects.equals(index, other.index) && Objects.equals(id, other.id); - } - } - public PinnedQueryBuilder(QueryBuilder organicQuery, String... ids) { this(organicQuery, Arrays.asList(ids), null); } - public PinnedQueryBuilder(QueryBuilder organicQuery, Item... docs) { + public PinnedQueryBuilder(QueryBuilder organicQuery, SpecifiedDocument... docs) { this(organicQuery, null, Arrays.asList(docs)); } /** * Creates a new PinnedQueryBuilder */ - private PinnedQueryBuilder(QueryBuilder organicQuery, List ids, List docs) { + private PinnedQueryBuilder(QueryBuilder organicQuery, List ids, List docs) { if (organicQuery == null) { throw new IllegalArgumentException("[" + NAME + "] organicQuery cannot be null"); } @@ -215,8 +105,8 @@ private PinnedQueryBuilder(QueryBuilder organicQuery, List ids, List deduped = new LinkedHashSet<>(); - for (Item doc : docs) { + LinkedHashSet deduped = new LinkedHashSet<>(); + for (SpecifiedDocument doc : docs) { if (doc == null) { throw new IllegalArgumentException("[" + NAME + "] doc cannot be null"); } @@ -239,7 +129,7 @@ public PinnedQueryBuilder(StreamInput in) throws IOException { docs = null; } else { ids = in.readOptionalStringCollectionAsList(); - docs = in.readBoolean() ? in.readCollectionAsList(Item::new) : null; + docs = in.readBoolean() ? in.readCollectionAsList(SpecifiedDocument::new) : null; } organicQuery = in.readNamedWriteable(QueryBuilder.class); } @@ -280,7 +170,7 @@ public List ids() { /** * @return the pinned docs for the query. */ - public List docs() { + public List docs() { if (this.docs == null) { return Collections.emptyList(); } @@ -303,8 +193,8 @@ protected void doXContent(XContentBuilder builder, Params params) throws IOExcep } if (docs != null) { builder.startArray(DOCS_FIELD.getPreferredName()); - for (Item item : docs) { - builder.value(item); + for (SpecifiedDocument specifiedDocument : docs) { + builder.value(specifiedDocument); } builder.endArray(); } @@ -317,13 +207,13 @@ protected void doXContent(XContentBuilder builder, Params params) throws IOExcep @SuppressWarnings("unchecked") List ids = (List) a[1]; @SuppressWarnings("unchecked") - List docs = (List) a[2]; + List docs = (List) a[2]; return new PinnedQueryBuilder(organicQuery, ids, docs); }); static { PARSER.declareObject(constructorArg(), (p, c) -> parseInnerQueryBuilder(p), ORGANIC_QUERY_FIELD); PARSER.declareStringArray(optionalConstructorArg(), IDS_FIELD); - PARSER.declareObjectArray(optionalConstructorArg(), Item.PARSER, DOCS_FIELD); + PARSER.declareObjectArray(optionalConstructorArg(), SpecifiedDocument.PARSER, DOCS_FIELD); declareStandardFields(PARSER); } @@ -357,24 +247,26 @@ protected Query doToQuery(SearchExecutionContext context) throws IOException { if (idField == null) { return new MatchNoDocsQuery("No mappings"); } - List items = (docs != null) ? docs : ids.stream().map(id -> new Item(id)).toList(); - if (items.isEmpty()) { + List specifiedDocuments = (docs != null) + ? docs + : ids.stream().map(id -> new SpecifiedDocument(null, id)).toList(); + if (specifiedDocuments.isEmpty()) { return new CappedScoreQuery(organicQuery.toQuery(context), MAX_ORGANIC_SCORE); } else { List pinnedQueries = new ArrayList<>(); // Ensure each pin order using a Boost query with the relevant boost factor int minPin = NumericUtils.floatToSortableInt(MAX_ORGANIC_SCORE) + 1; - int boostNum = minPin + items.size(); + int boostNum = minPin + specifiedDocuments.size(); float lastScore = Float.MAX_VALUE; - for (Item item : items) { + for (SpecifiedDocument specifiedDocument : specifiedDocuments) { float pinScore = NumericUtils.sortableIntToFloat(boostNum); assert pinScore < lastScore; lastScore = pinScore; boostNum--; - if (item.index == null || context.indexMatches(item.index)) { + if (specifiedDocument.index() == null || context.indexMatches(specifiedDocument.index())) { // Ensure the pin order using a Boost query with the relevant boost factor - Query idQuery = new BoostQuery(new ConstantScoreQuery(idField.termQuery(item.id, context)), pinScore); + Query idQuery = new BoostQuery(new ConstantScoreQuery(idField.termQuery(specifiedDocument.id(), context)), pinScore); pinnedQueries.add(idQuery); } } diff --git a/x-pack/plugin/search-business-rules/src/main/java/org/elasticsearch/xpack/searchbusinessrules/SpecifiedDocument.java b/x-pack/plugin/search-business-rules/src/main/java/org/elasticsearch/xpack/searchbusinessrules/SpecifiedDocument.java new file mode 100644 index 0000000000000..8c3801630fb2b --- /dev/null +++ b/x-pack/plugin/search-business-rules/src/main/java/org/elasticsearch/xpack/searchbusinessrules/SpecifiedDocument.java @@ -0,0 +1,136 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License + * 2.0; you may not use this file except in compliance with the Elastic License + * 2.0. + */ + +package org.elasticsearch.xpack.searchbusinessrules; + +import org.elasticsearch.TransportVersion; +import org.elasticsearch.TransportVersions; +import org.elasticsearch.common.Strings; +import org.elasticsearch.common.io.stream.StreamInput; +import org.elasticsearch.common.io.stream.StreamOutput; +import org.elasticsearch.common.io.stream.Writeable; +import org.elasticsearch.common.regex.Regex; +import org.elasticsearch.xcontent.ConstructingObjectParser; +import org.elasticsearch.xcontent.ParseField; +import org.elasticsearch.xcontent.ToXContentObject; +import org.elasticsearch.xcontent.XContentBuilder; + +import java.io.IOException; +import java.util.Objects; + +import static org.elasticsearch.xcontent.ConstructingObjectParser.constructorArg; +import static org.elasticsearch.xcontent.ConstructingObjectParser.optionalConstructorArg; + +/** + * A single specified document to be used for a {@link PinnedQueryBuilder} or query rules. + */ +public final class SpecifiedDocument implements ToXContentObject, Writeable { + + public static final TransportVersion OPTIONAL_INDEX_IN_DOCS_VERSION = TransportVersions.V_8_11_X; + + public static final String NAME = "specified_document"; + + public static final ParseField INDEX_FIELD = new ParseField("_index"); + public static final ParseField ID_FIELD = new ParseField("_id"); + + private final String index; + private final String id; + + /** + * Constructor for a given specified document request + * + * @param index the index where the document is located + * @param id and its id + */ + public SpecifiedDocument(String index, String id) { + if (index != null && Regex.isSimpleMatchPattern(index)) { + throw new IllegalArgumentException("Specified document index cannot contain wildcard expressions"); + } + if (id == null) { + throw new IllegalArgumentException("Specified document requires id to be non-null"); + } + this.index = index; + this.id = id; + } + + /** + * Read from a stream. + */ + public SpecifiedDocument(StreamInput in) throws IOException { + if (in.getTransportVersion().onOrAfter(OPTIONAL_INDEX_IN_DOCS_VERSION)) { + index = in.readOptionalString(); + } else { + index = in.readString(); + } + id = in.readString(); + } + + @Override + public void writeTo(StreamOutput out) throws IOException { + if (out.getTransportVersion().onOrAfter(OPTIONAL_INDEX_IN_DOCS_VERSION)) { + out.writeOptionalString(index); + } else { + if (index == null) { + throw new IllegalArgumentException( + "[_index] needs to be specified for docs elements when cluster nodes are not in the same version" + ); + } + out.writeString(index); + } + out.writeString(id); + } + + @Override + public XContentBuilder toXContent(XContentBuilder builder, Params params) throws IOException { + builder.startObject(); + if (this.index != null) { + builder.field(INDEX_FIELD.getPreferredName(), this.index); + } + builder.field(ID_FIELD.getPreferredName(), this.id); + return builder.endObject(); + } + + static final ConstructingObjectParser PARSER = new ConstructingObjectParser<>( + NAME, + a -> new SpecifiedDocument((String) a[0], (String) a[1]) + ); + + static { + PARSER.declareString(optionalConstructorArg(), INDEX_FIELD); + PARSER.declareString(constructorArg(), ID_FIELD); + } + + @Override + public String toString() { + return Strings.toString(this, true, true); + } + + @Override + public int hashCode() { + return Objects.hash(index, id); + } + + @Override + public boolean equals(Object o) { + if (this == o) { + return true; + } + if ((o instanceof SpecifiedDocument) == false) { + return false; + } + SpecifiedDocument other = (SpecifiedDocument) o; + return Objects.equals(index, other.index) && Objects.equals(id, other.id); + } + + public String id() { + return id; + } + + public String index() { + return index; + } +} diff --git a/x-pack/plugin/search-business-rules/src/test/java/org/elasticsearch/xpack/searchbusinessrules/PinnedQueryBuilderTests.java b/x-pack/plugin/search-business-rules/src/test/java/org/elasticsearch/xpack/searchbusinessrules/PinnedQueryBuilderTests.java index 3f41d186d9f8a..56d652b05f100 100644 --- a/x-pack/plugin/search-business-rules/src/test/java/org/elasticsearch/xpack/searchbusinessrules/PinnedQueryBuilderTests.java +++ b/x-pack/plugin/search-business-rules/src/test/java/org/elasticsearch/xpack/searchbusinessrules/PinnedQueryBuilderTests.java @@ -21,7 +21,6 @@ import org.elasticsearch.xcontent.XContentParser; import org.elasticsearch.xcontent.XContentType; import org.elasticsearch.xcontent.json.JsonStringEncoder; -import org.elasticsearch.xpack.searchbusinessrules.PinnedQueryBuilder.Item; import java.io.IOException; import java.util.ArrayList; @@ -91,8 +90,13 @@ private static QueryBuilder createTestTermQueryBuilder() { return new TermQueryBuilder(fieldName, value); } - private static Item[] generateRandomItems() { - return randomArray(1, 100, Item[]::new, () -> new Item(randomBoolean() ? null : randomAlphaOfLength(64), randomAlphaOfLength(256))); + private static SpecifiedDocument[] generateRandomItems() { + return randomArray( + 1, + 100, + SpecifiedDocument[]::new, + () -> new SpecifiedDocument(randomBoolean() ? null : randomAlphaOfLength(64), randomAlphaOfLength(256)) + ); } @Override @@ -121,24 +125,29 @@ public void testIllegalArguments() { expectThrows(IllegalArgumentException.class, () -> new PinnedQueryBuilder(new MatchAllQueryBuilder(), (String) null)); expectThrows(IllegalArgumentException.class, () -> new PinnedQueryBuilder(null, "1")); expectThrows(IllegalArgumentException.class, () -> new PinnedQueryBuilder(new MatchAllQueryBuilder(), "1", null, "2")); + expectThrows(IllegalArgumentException.class, () -> new PinnedQueryBuilder(new MatchAllQueryBuilder(), (SpecifiedDocument) null)); + expectThrows(IllegalArgumentException.class, () -> new PinnedQueryBuilder(null, new SpecifiedDocument("test", "1"))); expectThrows( IllegalArgumentException.class, - () -> new PinnedQueryBuilder(new MatchAllQueryBuilder(), (PinnedQueryBuilder.Item) null) + () -> new PinnedQueryBuilder( + new MatchAllQueryBuilder(), + new SpecifiedDocument("test", "1"), + null, + new SpecifiedDocument("test", "2") + ) ); - expectThrows(IllegalArgumentException.class, () -> new PinnedQueryBuilder(null, new Item("test", "1"))); expectThrows( IllegalArgumentException.class, - () -> new PinnedQueryBuilder(new MatchAllQueryBuilder(), new Item("test", "1"), null, new Item("test", "2")) + () -> new PinnedQueryBuilder(new MatchAllQueryBuilder(), new SpecifiedDocument("test*", "1")) ); - expectThrows(IllegalArgumentException.class, () -> new PinnedQueryBuilder(new MatchAllQueryBuilder(), new Item("test*", "1"))); String[] bigIdList = new String[PinnedQueryBuilder.MAX_NUM_PINNED_HITS + 1]; - Item[] bigItemList = new Item[PinnedQueryBuilder.MAX_NUM_PINNED_HITS + 1]; + SpecifiedDocument[] bigSpecifiedDocumentList = new SpecifiedDocument[PinnedQueryBuilder.MAX_NUM_PINNED_HITS + 1]; for (int i = 0; i < bigIdList.length; i++) { bigIdList[i] = String.valueOf(i); - bigItemList[i] = new Item("test", String.valueOf(i)); + bigSpecifiedDocumentList[i] = new SpecifiedDocument("test", String.valueOf(i)); } expectThrows(IllegalArgumentException.class, () -> new PinnedQueryBuilder(new MatchAllQueryBuilder(), bigIdList)); - expectThrows(IllegalArgumentException.class, () -> new PinnedQueryBuilder(new MatchAllQueryBuilder(), bigItemList)); + expectThrows(IllegalArgumentException.class, () -> new PinnedQueryBuilder(new MatchAllQueryBuilder(), bigSpecifiedDocumentList)); } @@ -214,7 +223,7 @@ public void testIdsRewrite() throws IOException { } public void testDocsRewrite() throws IOException { - PinnedQueryBuilder pinnedQueryBuilder = new PinnedQueryBuilder(new TermQueryBuilder("foo", 1), new Item("test", "1")); + PinnedQueryBuilder pinnedQueryBuilder = new PinnedQueryBuilder(new TermQueryBuilder("foo", 1), new SpecifiedDocument("test", "1")); QueryBuilder rewritten = pinnedQueryBuilder.rewrite(createSearchExecutionContext()); assertThat(rewritten, instanceOf(PinnedQueryBuilder.class)); } @@ -239,12 +248,16 @@ public void testIdInsertionOrderRetained() { } public void testDocInsertionOrderRetained() { - Item[] items = randomArray(10, Item[]::new, () -> new Item(randomAlphaOfLength(64), randomAlphaOfLength(256))); - PinnedQueryBuilder pqb = new PinnedQueryBuilder(new MatchAllQueryBuilder(), items); - List addedDocs = pqb.docs(); + SpecifiedDocument[] specifiedDocuments = randomArray( + 10, + SpecifiedDocument[]::new, + () -> new SpecifiedDocument(randomAlphaOfLength(64), randomAlphaOfLength(256)) + ); + PinnedQueryBuilder pqb = new PinnedQueryBuilder(new MatchAllQueryBuilder(), specifiedDocuments); + List addedDocs = pqb.docs(); int pos = 0; - for (Item item : addedDocs) { - assertEquals(items[pos++], item); + for (SpecifiedDocument specifiedDocument : addedDocs) { + assertEquals(specifiedDocuments[pos++], specifiedDocument); } } } From d0253a6c7b76b74965ad88ad835bc75eb2b2041e Mon Sep 17 00:00:00 2001 From: Ioana Tagirta Date: Fri, 2 Aug 2024 14:56:32 +0200 Subject: [PATCH 21/36] ES|QL match operator - add check for snapshot build in test (#111536) * Check for snapshot builds for match operator tests * Unmute test --- muted-tests.yml | 3 --- .../elasticsearch/xpack/esql/plugin/MatchOperatorIT.java | 9 +++++++++ .../elasticsearch/xpack/esql/analysis/VerifierTests.java | 4 ++++ .../esql/optimizer/LocalPhysicalPlanOptimizerTests.java | 5 +++++ .../rest-api-spec/test/esql/180_match_operator.yml | 1 + 5 files changed, 19 insertions(+), 3 deletions(-) diff --git a/muted-tests.yml b/muted-tests.yml index aaec3345782e8..98a9a52f85a08 100644 --- a/muted-tests.yml +++ b/muted-tests.yml @@ -105,9 +105,6 @@ tests: issue: https://github.com/elastic/elasticsearch/issues/111282 - class: org.elasticsearch.xpack.ml.integration.DatafeedJobsRestIT issue: https://github.com/elastic/elasticsearch/issues/111319 -- class: org.elasticsearch.xpack.esql.analysis.VerifierTests - method: testMatchFilter - issue: https://github.com/elastic/elasticsearch/issues/111380 - class: org.elasticsearch.xpack.ml.integration.InferenceIngestInputConfigIT method: testIngestWithInputFields issue: https://github.com/elastic/elasticsearch/issues/111383 diff --git a/x-pack/plugin/esql/src/internalClusterTest/java/org/elasticsearch/xpack/esql/plugin/MatchOperatorIT.java b/x-pack/plugin/esql/src/internalClusterTest/java/org/elasticsearch/xpack/esql/plugin/MatchOperatorIT.java index 6eef27fcd04b8..ab21a2bb8b995 100644 --- a/x-pack/plugin/esql/src/internalClusterTest/java/org/elasticsearch/xpack/esql/plugin/MatchOperatorIT.java +++ b/x-pack/plugin/esql/src/internalClusterTest/java/org/elasticsearch/xpack/esql/plugin/MatchOperatorIT.java @@ -7,6 +7,7 @@ package org.elasticsearch.xpack.esql.plugin; +import org.elasticsearch.Build; import org.elasticsearch.ElasticsearchException; import org.elasticsearch.action.index.IndexRequest; import org.elasticsearch.action.support.WriteRequest; @@ -14,6 +15,8 @@ import org.elasticsearch.xpack.esql.VerificationException; import org.elasticsearch.xpack.esql.action.AbstractEsqlIntegTestCase; import org.elasticsearch.xpack.esql.action.ColumnInfoImpl; +import org.elasticsearch.xpack.esql.action.EsqlQueryRequest; +import org.elasticsearch.xpack.esql.action.EsqlQueryResponse; import org.elasticsearch.xpack.esql.core.type.DataType; import org.junit.Before; @@ -34,6 +37,12 @@ public void setupIndex() { createAndPopulateIndex(); } + @Override + protected EsqlQueryResponse run(EsqlQueryRequest request) { + assumeTrue("match operator available in snapshot builds only", Build.current().isSnapshot()); + return super.run(request); + } + public void testSimpleWhereMatch() { var query = """ FROM test diff --git a/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/analysis/VerifierTests.java b/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/analysis/VerifierTests.java index 49372da04d8c3..920f0a670d09d 100644 --- a/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/analysis/VerifierTests.java +++ b/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/analysis/VerifierTests.java @@ -628,10 +628,14 @@ public void testWeightedAvg() { } public void testMatchInsideEval() throws Exception { + assumeTrue("Match operator is available just for snapshots", Build.current().isSnapshot()); + assertEquals("1:36: EVAL does not support MATCH expressions", error("row title = \"brown fox\" | eval x = title match \"fox\" ")); } public void testMatchFilter() throws Exception { + assumeTrue("Match operator is available just for snapshots", Build.current().isSnapshot()); + assertEquals( "1:63: MATCH requires a mapped index field, found [name]", error("from test | eval name = concat(first_name, last_name) | where name match \"Anna\"") diff --git a/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/optimizer/LocalPhysicalPlanOptimizerTests.java b/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/optimizer/LocalPhysicalPlanOptimizerTests.java index 7374ee55b048c..89b3cad1e831b 100644 --- a/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/optimizer/LocalPhysicalPlanOptimizerTests.java +++ b/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/optimizer/LocalPhysicalPlanOptimizerTests.java @@ -10,6 +10,7 @@ import com.carrotsearch.randomizedtesting.annotations.ParametersFactory; import org.apache.lucene.search.IndexSearcher; +import org.elasticsearch.Build; import org.elasticsearch.common.network.NetworkAddress; import org.elasticsearch.common.settings.Settings; import org.elasticsearch.core.Tuple; @@ -761,6 +762,8 @@ public void testMissingFieldsDoNotGetExtracted() { * estimatedRowSize[324] */ public void testSingleMatchFilterPushdown() { + assumeTrue("Match operator is available just for snapshots", Build.current().isSnapshot()); + var plan = plannerOptimizer.plan(""" from test | where first_name match "Anna" @@ -791,6 +794,8 @@ public void testSingleMatchFilterPushdown() { * [_doc{f}#22], limit[1000], sort[[FieldSort[field=emp_no{f}#12, direction=ASC, nulls=LAST]]] estimatedRowSize[336] */ public void testMultipleMatchFilterPushdown() { + assumeTrue("Match operator is available just for snapshots", Build.current().isSnapshot()); + var plan = plannerOptimizer.plan(""" from test | where first_name match "Anna" OR first_name match "Anneke" diff --git a/x-pack/plugin/src/yamlRestTest/resources/rest-api-spec/test/esql/180_match_operator.yml b/x-pack/plugin/src/yamlRestTest/resources/rest-api-spec/test/esql/180_match_operator.yml index 061fbbd57c2c8..959581b18c11a 100644 --- a/x-pack/plugin/src/yamlRestTest/resources/rest-api-spec/test/esql/180_match_operator.yml +++ b/x-pack/plugin/src/yamlRestTest/resources/rest-api-spec/test/esql/180_match_operator.yml @@ -6,6 +6,7 @@ setup: path: /_query parameters: [ method, path, parameters, capabilities ] capabilities: [ match_operator ] + cluster_features: [ "gte_v8.16.0" ] reason: "Match operator added in 8.16.0" test_runner_features: [capabilities, allowed_warnings_regex] - do: From a0480ce17c3078a8d6c071519b550ef09291881c Mon Sep 17 00:00:00 2001 From: Nhat Nguyen Date: Fri, 2 Aug 2024 07:56:59 -0700 Subject: [PATCH 22/36] Expose shard field stats (#111525) Previously, we returned the number of segments and the total number of fields in those segments in NodeMappingStats (see #111123). However, the total number of fields returned in that PR might be very inaccurate for indices having large mappings but only a small number of actual fields. This change returns a more accurate total number of fields using the Lucene FieldInfos from those segments. Since we need to acquire a searcher to compute this stats, we opt to compute it after a shard is refreshed and cache the result. Relates #111123 --- .../elasticsearch/index/shard/IndexShard.java | 37 +++++++++++++- .../index/shard/ShardFieldStats.java | 20 ++++++++ .../index/shard/IndexShardTests.java | 51 +++++++++++++++++++ 3 files changed, 107 insertions(+), 1 deletion(-) create mode 100644 server/src/main/java/org/elasticsearch/index/shard/ShardFieldStats.java diff --git a/server/src/main/java/org/elasticsearch/index/shard/IndexShard.java b/server/src/main/java/org/elasticsearch/index/shard/IndexShard.java index 73cbca36a69c8..c6b6b9d98cdae 100644 --- a/server/src/main/java/org/elasticsearch/index/shard/IndexShard.java +++ b/server/src/main/java/org/elasticsearch/index/shard/IndexShard.java @@ -17,6 +17,7 @@ import org.apache.lucene.index.FilterDirectoryReader; import org.apache.lucene.index.IndexCommit; import org.apache.lucene.index.LeafReader; +import org.apache.lucene.index.LeafReaderContext; import org.apache.lucene.index.SegmentInfos; import org.apache.lucene.search.QueryCachingPolicy; import org.apache.lucene.search.ReferenceManager; @@ -224,6 +225,8 @@ public class IndexShard extends AbstractIndexShardComponent implements IndicesCl private final IndexStorePlugin.SnapshotCommitSupplier snapshotCommitSupplier; private final Engine.IndexCommitListener indexCommitListener; private FieldInfos fieldInfos; + private volatile ShardFieldStats shardFieldStats; + // sys prop to disable the field has value feature, defaults to true (enabled) if set to false (disabled) the // field caps always returns empty fields ignoring the value of the query param `field_caps_empty_fields_filter`. private final boolean enableFieldHasValue = Booleans.parseBoolean( @@ -3489,7 +3492,7 @@ private EngineConfig newEngineConfig(LongSupplier globalCheckpointSupplier) { cachingPolicy, translogConfig, IndexingMemoryController.SHARD_INACTIVE_TIME_SETTING.get(indexSettings.getSettings()), - List.of(refreshListeners, refreshPendingLocationListener, refreshFieldHasValueListener), + List.of(refreshListeners, refreshPendingLocationListener, refreshFieldHasValueListener, new RefreshShardFieldStatsListener()), Collections.singletonList(new RefreshMetricUpdater(refreshMetric)), indexSort, circuitBreakerService, @@ -4060,6 +4063,38 @@ public void afterRefresh(boolean didRefresh) { } } + /** + * Returns the shard-level field stats, which includes the number of segments in the latest NRT reader of this shard + * and the total number of fields across those segments. + */ + public ShardFieldStats getShardFieldStats() { + return shardFieldStats; + } + + private class RefreshShardFieldStatsListener implements ReferenceManager.RefreshListener { + @Override + public void beforeRefresh() { + + } + + @Override + public void afterRefresh(boolean didRefresh) { + if (shardFieldStats == null || didRefresh) { + try (var searcher = getEngine().acquireSearcher("shard_field_stats", Engine.SearcherScope.INTERNAL)) { + int numSegments = 0; + int totalFields = 0; + for (LeafReaderContext leaf : searcher.getLeafContexts()) { + numSegments++; + totalFields += leaf.reader().getFieldInfos().size(); + } + shardFieldStats = new ShardFieldStats(numSegments, totalFields); + } catch (AlreadyClosedException ignored) { + + } + } + } + } + /** * Ensures this shard is search active before invoking the provided listener. *

diff --git a/server/src/main/java/org/elasticsearch/index/shard/ShardFieldStats.java b/server/src/main/java/org/elasticsearch/index/shard/ShardFieldStats.java new file mode 100644 index 0000000000000..9c53abb1e95e5 --- /dev/null +++ b/server/src/main/java/org/elasticsearch/index/shard/ShardFieldStats.java @@ -0,0 +1,20 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License + * 2.0 and the Server Side Public License, v 1; you may not use this file except + * in compliance with, at your election, the Elastic License 2.0 or the Server + * Side Public License, v 1. + */ + +package org.elasticsearch.index.shard; + +/** + * A per shard stats including the number of segments and total fields across those segments. + * These stats should be recomputed whenever the shard is refreshed. + * + * @param numSegments the number of segments + * @param totalFields the total number of fields across the segments + */ +public record ShardFieldStats(int numSegments, int totalFields) { + +} diff --git a/server/src/test/java/org/elasticsearch/index/shard/IndexShardTests.java b/server/src/test/java/org/elasticsearch/index/shard/IndexShardTests.java index 142c03cdfa053..0f29ad0be6b8a 100644 --- a/server/src/test/java/org/elasticsearch/index/shard/IndexShardTests.java +++ b/server/src/test/java/org/elasticsearch/index/shard/IndexShardTests.java @@ -1784,6 +1784,57 @@ public void testExternalRefreshMetric() throws IOException { closeShards(shard); } + public void testShardFieldStats() throws IOException { + Settings settings = Settings.builder().put(IndexSettings.INDEX_REFRESH_INTERVAL_SETTING.getKey(), TimeValue.MINUS_ONE).build(); + IndexShard shard = newShard(true, settings); + assertNull(shard.getShardFieldStats()); + recoverShardFromStore(shard); + ShardFieldStats stats = shard.getShardFieldStats(); + assertNotNull(stats); + assertThat(stats.numSegments(), equalTo(0)); + assertThat(stats.totalFields(), equalTo(0)); + // index some documents + int numDocs = between(1, 10); + for (int i = 0; i < numDocs; i++) { + indexDoc(shard, "_doc", "first_" + i, """ + { + "f1": "foo", + "f2": "bar" + } + """); + } + assertThat(shard.getShardFieldStats(), sameInstance(stats)); + shard.refresh("test"); + stats = shard.getShardFieldStats(); + assertThat(stats.numSegments(), equalTo(1)); + // _id, _source, _version, _primary_term, _seq_no, f1, f1.keyword, f2, f2.keyword, + assertThat(stats.totalFields(), equalTo(9)); + // don't re-compute on refresh without change + shard.refresh("test"); + assertThat(shard.getShardFieldStats(), sameInstance(stats)); + // index more docs + numDocs = between(1, 10); + for (int i = 0; i < numDocs; i++) { + indexDoc(shard, "_doc", "first_" + i, """ + { + "f1": "foo", + "f2": "bar", + "f3": "foobar" + } + """); + } + shard.refresh("test"); + stats = shard.getShardFieldStats(); + assertThat(stats.numSegments(), equalTo(2)); + // 9 + _id, _source, _version, _primary_term, _seq_no, f1, f1.keyword, f2, f2.keyword, f3, f3.keyword + assertThat(stats.totalFields(), equalTo(21)); + shard.forceMerge(new ForceMergeRequest().maxNumSegments(1).flush(true)); + stats = shard.getShardFieldStats(); + assertThat(stats.numSegments(), equalTo(1)); + assertThat(stats.totalFields(), equalTo(12)); + closeShards(shard); + } + public void testIndexingOperationsListeners() throws IOException { IndexShard shard = newStartedShard(true); indexDoc(shard, "_doc", "0", "{\"foo\" : \"bar\"}"); From ec4444b166e4c5cd0ae8e2e3ddf1d0805d92c83e Mon Sep 17 00:00:00 2001 From: Keith Massey Date: Fri, 2 Aug 2024 12:48:51 -0500 Subject: [PATCH 23/36] Fixing potential OOME in HistoryIntegrationTest.testThatHistoryIsTruncated() (#111556) --- .../integration/HistoryIntegrationTests.java | 20 ++++++++++++++----- .../elasticsearch/xpack/watcher/Watcher.java | 1 + 2 files changed, 16 insertions(+), 5 deletions(-) diff --git a/x-pack/plugin/watcher/src/internalClusterTest/java/org/elasticsearch/xpack/watcher/test/integration/HistoryIntegrationTests.java b/x-pack/plugin/watcher/src/internalClusterTest/java/org/elasticsearch/xpack/watcher/test/integration/HistoryIntegrationTests.java index ee645e4f32798..0070554d99d27 100644 --- a/x-pack/plugin/watcher/src/internalClusterTest/java/org/elasticsearch/xpack/watcher/test/integration/HistoryIntegrationTests.java +++ b/x-pack/plugin/watcher/src/internalClusterTest/java/org/elasticsearch/xpack/watcher/test/integration/HistoryIntegrationTests.java @@ -8,6 +8,7 @@ import org.elasticsearch.action.admin.indices.mapping.get.GetMappingsResponse; import org.elasticsearch.action.search.SearchRequestBuilder; +import org.elasticsearch.common.settings.Settings; import org.elasticsearch.common.unit.ByteSizeValue; import org.elasticsearch.protocol.xpack.watcher.PutWatchResponse; import org.elasticsearch.search.SearchHit; @@ -53,6 +54,15 @@ public class HistoryIntegrationTests extends AbstractWatcherIntegrationTestCase { + @Override + protected Settings nodeSettings(int nodeOrdinal, Settings otherSettings) { + + return Settings.builder() + .put(super.nodeSettings(nodeOrdinal, otherSettings)) + .put("xpack.watcher.max.history.record.size", "100kb") // used for testThatHistoryIsTruncated() + .build(); + } + // issue: https://github.com/elastic/x-plugins/issues/2338 public void testThatHistoryIsWrittenWithChainedInput() throws Exception { XContentBuilder xContentBuilder = jsonBuilder().startObject() @@ -232,13 +242,13 @@ public void testThatHistoryContainsStatus() throws Exception { public void testThatHistoryIsTruncated() throws Exception { { /* - * The input for this watch is 1 MB, smaller than the 10 MB default of HistoryStore's MAX_HISTORY_SIZE_SETTING. So we do not - * expect its history record to be truncated. + * The input for this watch is 20 KB, smaller than the configured 100 KB of HistoryStore's MAX_HISTORY_SIZE_SETTING. So we do + * not expect its history record to be truncated. */ new PutWatchRequestBuilder(client()).setId("test_watch_small") .setSource( watchBuilder().trigger(schedule(interval(5, IntervalSchedule.Interval.Unit.HOURS))) - .input(simpleInput("foo", randomAlphaOfLength((int) ByteSizeValue.ofMb(1).getBytes()))) + .input(simpleInput("foo", randomAlphaOfLength((int) ByteSizeValue.ofKb(20).getBytes()))) .addAction("_logger", loggingAction("#### randomLogging")) ) .get(); @@ -261,13 +271,13 @@ public void testThatHistoryIsTruncated() throws Exception { } { /* - * The input for this watch is 20 MB, much bigger than the 10 MB default of HistoryStore's MAX_HISTORY_SIZE_SETTING. So we + * The input for this watch is 500 KB, much bigger than the configured 100 KB of HistoryStore's MAX_HISTORY_SIZE_SETTING. So we * expect to see its history record truncated before being stored. */ new PutWatchRequestBuilder(client()).setId("test_watch_large") .setSource( watchBuilder().trigger(schedule(interval(5, IntervalSchedule.Interval.Unit.HOURS))) - .input(simpleInput("foo", randomAlphaOfLength((int) ByteSizeValue.ofMb(20).getBytes()))) + .input(simpleInput("foo", randomAlphaOfLength((int) ByteSizeValue.ofKb(500).getBytes()))) .addAction("_logger", loggingAction("#### randomLogging")) ) .get(); diff --git a/x-pack/plugin/watcher/src/main/java/org/elasticsearch/xpack/watcher/Watcher.java b/x-pack/plugin/watcher/src/main/java/org/elasticsearch/xpack/watcher/Watcher.java index 821c92b514667..5cff1db9dd174 100644 --- a/x-pack/plugin/watcher/src/main/java/org/elasticsearch/xpack/watcher/Watcher.java +++ b/x-pack/plugin/watcher/src/main/java/org/elasticsearch/xpack/watcher/Watcher.java @@ -613,6 +613,7 @@ public List> getSettings() { settings.add(SETTING_BULK_CONCURRENT_REQUESTS); settings.add(SETTING_BULK_FLUSH_INTERVAL); settings.add(SETTING_BULK_SIZE); + settings.add(HistoryStore.MAX_HISTORY_SIZE_SETTING); // notification services settings.addAll(SlackService.getSettings()); From bf88da3ccef9966e3c96af01af6e96a57564960b Mon Sep 17 00:00:00 2001 From: Parker Timmins Date: Fri, 2 Aug 2024 13:46:55 -0500 Subject: [PATCH 24/36] Register SLM run before snapshotting to save stats (#110216) The SLM health indicator relies on the policyMetadata.getInvocationsSinceLastSuccess to determine if the last several snapshots have failed. If a snapshot fails and the master is shutdown before setting invocationsSinceLastSuccess, the fact that failure occurred will be lost. To solve this, before snapshotting, we register that a snapshot is about to run, in the cluster state custom metadata. If the run fails, and invocationsSinceLastSuccess is not updated before a master shutdown, the fact that the failure occurred will not be lost. On completion of a subsequent snapshot run, SnapshotLifecycleTask will observe that there exists a registered snapshot which is no longer running. It will infer that the snapshot failed, and update invocationsSinceLastSuccess and other stats accordingly. A few parts of this change touch general snapshot code, and are worth noting: * Snapshots can only be uniquely identified with a uuid which had previously been generated in SnapshotService. This uuid is needed when there is a snapshot failure, but was not available to SLM as it was only returned in the SnapshotInfo after a success. To make this available, the uuid is now generated in the CreateSnapshotRequest constructor and passed to SnapshotService. In mixed version clusters, there exists a special case where the uuid is still generated in SnapshotService. * If a snapshot were registered before calling snapshot service, there would a small period of time when a snapshot is registered but not yet stored in SnapshotsInProgress. During this time, another snapshot from the same policy might incorrectly infer that the snapshot failed and update its stats accordingly. To avoid this race condition, we register snapshots within SnapshotService in the same cluster update which updates SnapshotsInProgress. --- docs/changelog/110216.yaml | 5 + .../org/elasticsearch/TransportVersions.java | 1 + .../create/CreateSnapshotRequest.java | 45 +- .../elasticsearch/cluster/ClusterModule.java | 7 + .../snapshots/RegisteredPolicySnapshots.java | 288 +++++++ .../elasticsearch/snapshots/SnapshotId.java | 26 +- .../snapshots/SnapshotsService.java | 14 +- .../create/CreateSnapshotRequestTests.java | 1 + ...eredPolicySnapshotsSerializationTests.java | 113 +++ .../slm/SnapshotLifecyclePolicyMetadata.java | 5 + .../core/slm/SnapshotLifecycleStats.java | 8 + .../xpack/slm/SLMStatDisruptionIT.java | 749 ++++++++++++++++++ .../xpack/slm/SnapshotLifecycle.java | 6 + .../xpack/slm/SnapshotLifecycleTask.java | 129 ++- .../slm/SnapshotLifecyclePolicyTests.java | 2 +- .../xpack/slm/SnapshotLifecycleTaskTests.java | 279 ++++++- 16 files changed, 1623 insertions(+), 55 deletions(-) create mode 100644 docs/changelog/110216.yaml create mode 100644 server/src/main/java/org/elasticsearch/snapshots/RegisteredPolicySnapshots.java create mode 100644 server/src/test/java/org/elasticsearch/snapshots/RegisteredPolicySnapshotsSerializationTests.java create mode 100644 x-pack/plugin/slm/src/internalClusterTest/java/org/elasticsearch/xpack/slm/SLMStatDisruptionIT.java diff --git a/docs/changelog/110216.yaml b/docs/changelog/110216.yaml new file mode 100644 index 0000000000000..00ab20b230e2c --- /dev/null +++ b/docs/changelog/110216.yaml @@ -0,0 +1,5 @@ +pr: 110216 +summary: Register SLM run before snapshotting to save stats +area: ILM+SLM +type: enhancement +issues: [] diff --git a/server/src/main/java/org/elasticsearch/TransportVersions.java b/server/src/main/java/org/elasticsearch/TransportVersions.java index cf501f5389408..53e6d04821717 100644 --- a/server/src/main/java/org/elasticsearch/TransportVersions.java +++ b/server/src/main/java/org/elasticsearch/TransportVersions.java @@ -183,6 +183,7 @@ static TransportVersion def(int id) { public static final TransportVersion FIX_VECTOR_SIMILARITY_INNER_HITS = def(8_713_00_0); public static final TransportVersion INDEX_REQUEST_UPDATE_BY_DOC_ORIGIN = def(8_714_00_0); public static final TransportVersion ESQL_ATTRIBUTE_CACHED_SERIALIZATION = def(8_715_00_0); + public static final TransportVersion REGISTER_SLM_STATS = def(8_716_00_0); /* * STOP! READ THIS FIRST! No, really, diff --git a/server/src/main/java/org/elasticsearch/action/admin/cluster/snapshots/create/CreateSnapshotRequest.java b/server/src/main/java/org/elasticsearch/action/admin/cluster/snapshots/create/CreateSnapshotRequest.java index 2c460319e3d86..d17ad9674a19d 100644 --- a/server/src/main/java/org/elasticsearch/action/admin/cluster/snapshots/create/CreateSnapshotRequest.java +++ b/server/src/main/java/org/elasticsearch/action/admin/cluster/snapshots/create/CreateSnapshotRequest.java @@ -17,6 +17,7 @@ import org.elasticsearch.action.support.master.MasterNodeRequest; import org.elasticsearch.cluster.metadata.DataStream; import org.elasticsearch.common.Strings; +import org.elasticsearch.common.UUIDs; import org.elasticsearch.common.bytes.BytesReference; import org.elasticsearch.common.io.stream.StreamInput; import org.elasticsearch.common.io.stream.StreamOutput; @@ -82,6 +83,9 @@ public class CreateSnapshotRequest extends MasterNodeRequest userMetadata; + @Nullable + private String uuid = null; + public CreateSnapshotRequest(TimeValue masterNodeTimeout) { super(masterNodeTimeout); } @@ -96,6 +100,7 @@ public CreateSnapshotRequest(TimeValue masterNodeTimeout, String repository, Str this(masterNodeTimeout); this.snapshot = snapshot; this.repository = repository; + this.uuid = UUIDs.randomBase64UUID(); } public CreateSnapshotRequest(StreamInput in) throws IOException { @@ -112,6 +117,7 @@ public CreateSnapshotRequest(StreamInput in) throws IOException { waitForCompletion = in.readBoolean(); partial = in.readBoolean(); userMetadata = in.readGenericMap(); + uuid = in.getTransportVersion().onOrAfter(TransportVersions.REGISTER_SLM_STATS) ? in.readOptionalString() : null; } @Override @@ -129,6 +135,9 @@ public void writeTo(StreamOutput out) throws IOException { out.writeBoolean(waitForCompletion); out.writeBoolean(partial); out.writeGenericMap(userMetadata); + if (out.getTransportVersion().onOrAfter(TransportVersions.REGISTER_SLM_STATS)) { + out.writeOptionalString(uuid); + } } @Override @@ -364,6 +373,35 @@ public CreateSnapshotRequest userMetadata(@Nullable Map userMeta return this; } + /** + * Set a uuid to identify snapshot. + * If no uuid is specified, one will be created within SnapshotService + */ + public CreateSnapshotRequest uuid(String uuid) { + this.uuid = uuid; + return this; + } + + /** + * Get the uuid, generating it if one does not yet exist. + * Because the uuid can be set, this method is NOT thread-safe. + *

+ * The uuid was previously generated in SnapshotService.createSnapshot + * but was moved to the CreateSnapshotRequest constructor so that the caller could + * uniquely identify the snapshot. Unfortunately, in a mixed-version cluster, + * the CreateSnapshotRequest could be created on a node which does not yet + * generate the uuid in the constructor. In this case, the uuid + * must be generated when it is first accessed with this getter. + * + * @return the uuid that will be used for the snapshot + */ + public String uuid() { + if (this.uuid == null) { + this.uuid = UUIDs.randomBase64UUID(); + } + return this.uuid; + } + /** * @return Which plugin states should be included in the snapshot */ @@ -469,12 +507,13 @@ public boolean equals(Object o) { && Objects.equals(indicesOptions, that.indicesOptions) && Arrays.equals(featureStates, that.featureStates) && Objects.equals(masterNodeTimeout(), that.masterNodeTimeout()) - && Objects.equals(userMetadata, that.userMetadata); + && Objects.equals(userMetadata, that.userMetadata) + && Objects.equals(uuid, that.uuid); } @Override public int hashCode() { - int result = Objects.hash(snapshot, repository, indicesOptions, partial, includeGlobalState, waitForCompletion, userMetadata); + int result = Objects.hash(snapshot, repository, indicesOptions, partial, includeGlobalState, waitForCompletion, userMetadata, uuid); result = 31 * result + Arrays.hashCode(indices); result = 31 * result + Arrays.hashCode(featureStates); return result; @@ -505,6 +544,8 @@ public String toString() { + masterNodeTimeout() + ", metadata=" + userMetadata + + ", uuid=" + + uuid + '}'; } } diff --git a/server/src/main/java/org/elasticsearch/cluster/ClusterModule.java b/server/src/main/java/org/elasticsearch/cluster/ClusterModule.java index 29933ad20ef10..a6e0577cf702c 100644 --- a/server/src/main/java/org/elasticsearch/cluster/ClusterModule.java +++ b/server/src/main/java/org/elasticsearch/cluster/ClusterModule.java @@ -76,6 +76,7 @@ import org.elasticsearch.persistent.PersistentTasksNodeService; import org.elasticsearch.plugins.ClusterPlugin; import org.elasticsearch.script.ScriptMetadata; +import org.elasticsearch.snapshots.RegisteredPolicySnapshots; import org.elasticsearch.snapshots.SnapshotsInfoService; import org.elasticsearch.tasks.Task; import org.elasticsearch.tasks.TaskResultsService; @@ -234,6 +235,12 @@ public static List getNamedWriteables() { registerMetadataCustom(entries, NodesShutdownMetadata.TYPE, NodesShutdownMetadata::new, NodesShutdownMetadata::readDiffFrom); registerMetadataCustom(entries, FeatureMigrationResults.TYPE, FeatureMigrationResults::new, FeatureMigrationResults::readDiffFrom); registerMetadataCustom(entries, DesiredNodesMetadata.TYPE, DesiredNodesMetadata::new, DesiredNodesMetadata::readDiffFrom); + registerMetadataCustom( + entries, + RegisteredPolicySnapshots.TYPE, + RegisteredPolicySnapshots::new, + RegisteredPolicySnapshots.RegisteredSnapshotsDiff::new + ); // Task Status (not Diffable) entries.add(new Entry(Task.Status.class, PersistentTasksNodeService.Status.NAME, PersistentTasksNodeService.Status::new)); diff --git a/server/src/main/java/org/elasticsearch/snapshots/RegisteredPolicySnapshots.java b/server/src/main/java/org/elasticsearch/snapshots/RegisteredPolicySnapshots.java new file mode 100644 index 0000000000000..9eecf6e47aa90 --- /dev/null +++ b/server/src/main/java/org/elasticsearch/snapshots/RegisteredPolicySnapshots.java @@ -0,0 +1,288 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License + * 2.0 and the Server Side Public License, v 1; you may not use this file except + * in compliance with, at your election, the Elastic License 2.0 or the Server + * Side Public License, v 1. + */ + +package org.elasticsearch.snapshots; + +import org.elasticsearch.TransportVersion; +import org.elasticsearch.TransportVersions; +import org.elasticsearch.cluster.Diff; +import org.elasticsearch.cluster.NamedDiff; +import org.elasticsearch.cluster.SimpleDiffable; +import org.elasticsearch.cluster.metadata.Metadata; +import org.elasticsearch.common.Strings; +import org.elasticsearch.common.collect.Iterators; +import org.elasticsearch.common.io.stream.StreamInput; +import org.elasticsearch.common.io.stream.StreamOutput; +import org.elasticsearch.common.io.stream.Writeable; +import org.elasticsearch.xcontent.ConstructingObjectParser; +import org.elasticsearch.xcontent.ParseField; +import org.elasticsearch.xcontent.ToXContent; +import org.elasticsearch.xcontent.ToXContentObject; +import org.elasticsearch.xcontent.XContentBuilder; +import org.elasticsearch.xcontent.XContentParser; + +import java.io.IOException; +import java.util.ArrayList; +import java.util.Collections; +import java.util.EnumSet; +import java.util.Iterator; +import java.util.List; +import java.util.Map; +import java.util.Objects; + +import static org.elasticsearch.snapshots.SnapshotsService.POLICY_ID_METADATA_FIELD; + +/** + * {@link RegisteredPolicySnapshots} records a set of snapshot IDs along with their SLM policy name. It is used to infer + * the failure of snapshots which did not record their failure in SnapshotLifecycleStats. The set is stored in the + * cluster state as custom metadata. When a snapshot is started by SLM, it is added to this set. Upon completion, + * is it removed. If a snapshot does not record its failure in SnapshotLifecycleStats, likely due to a master shutdown, + * it will not be removed from the registered set. A subsequent snapshot will then find that a registered snapshot + * is no longer running and will infer that it failed, updating SnapshotLifecycleStats accordingly. + */ +public class RegisteredPolicySnapshots implements Metadata.Custom { + + public static final String TYPE = "registered_snapshots"; + private static final ParseField SNAPSHOTS = new ParseField("snapshots"); + public static final RegisteredPolicySnapshots EMPTY = new RegisteredPolicySnapshots(List.of()); + + @SuppressWarnings("unchecked") + public static final ConstructingObjectParser PARSER = new ConstructingObjectParser<>( + TYPE, + a -> new RegisteredPolicySnapshots((List) a[0]) + ); + + static { + PARSER.declareObjectArray(ConstructingObjectParser.constructorArg(), (p, c) -> PolicySnapshot.parse(p), SNAPSHOTS); + } + + private final List snapshots; + + public RegisteredPolicySnapshots(List snapshots) { + this.snapshots = Collections.unmodifiableList(snapshots); + } + + public RegisteredPolicySnapshots(StreamInput in) throws IOException { + this.snapshots = in.readCollectionAsImmutableList(PolicySnapshot::new); + } + + public List getSnapshots() { + return snapshots; + } + + public boolean contains(SnapshotId snapshotId) { + return snapshots.stream().map(PolicySnapshot::getSnapshotId).anyMatch(snapshotId::equals); + } + + public List getSnapshotsByPolicy(String policy) { + return snapshots.stream().filter(s -> s.getPolicy().equals(policy)).map(PolicySnapshot::getSnapshotId).toList(); + } + + @Override + public EnumSet context() { + return Metadata.ALL_CONTEXTS; + } + + @Override + public Diff diff(Metadata.Custom previousState) { + return new RegisteredSnapshotsDiff((RegisteredPolicySnapshots) previousState, this); + } + + @Override + public String getWriteableName() { + return TYPE; + } + + @Override + public TransportVersion getMinimalSupportedVersion() { + return TransportVersions.REGISTER_SLM_STATS; + } + + @Override + public void writeTo(StreamOutput out) throws IOException { + out.writeCollection(snapshots); + } + + @Override + public Iterator toXContentChunked(ToXContent.Params ignored) { + return Iterators.concat(Iterators.single((builder, params) -> { + builder.field(SNAPSHOTS.getPreferredName(), snapshots); + return builder; + })); + } + + public static RegisteredPolicySnapshots parse(XContentParser parser) throws IOException { + return PARSER.parse(parser, null); + } + + @Override + public String toString() { + return "RegisteredSnapshots{" + "snapshots=" + snapshots + '}'; + } + + @Override + public int hashCode() { + return Objects.hash(snapshots); + } + + @Override + public boolean equals(Object obj) { + if (obj == null) { + return false; + } + if (obj.getClass() != getClass()) { + return false; + } + RegisteredPolicySnapshots other = (RegisteredPolicySnapshots) obj; + return Objects.equals(snapshots, other.snapshots); + } + + public static class RegisteredSnapshotsDiff implements NamedDiff { + final List snapshots; + + RegisteredSnapshotsDiff(RegisteredPolicySnapshots before, RegisteredPolicySnapshots after) { + this.snapshots = after.snapshots; + } + + public RegisteredSnapshotsDiff(StreamInput in) throws IOException { + this.snapshots = new RegisteredPolicySnapshots(in).snapshots; + } + + @Override + public Metadata.Custom apply(Metadata.Custom part) { + return new RegisteredPolicySnapshots(snapshots); + } + + @Override + public String getWriteableName() { + return TYPE; + } + + @Override + public void writeTo(StreamOutput out) throws IOException { + out.writeCollection(snapshots); + } + + @Override + public TransportVersion getMinimalSupportedVersion() { + return TransportVersions.REGISTER_SLM_STATS; + } + } + + public Builder builder() { + return new Builder(this); + } + + private static String getPolicyFromMetadata(Map userMetadata) { + if (userMetadata != null && userMetadata.get(POLICY_ID_METADATA_FIELD) instanceof String p) { + return p; + } + return null; + } + + public static class Builder { + final List snapshots; + + Builder(RegisteredPolicySnapshots registeredPolicySnapshots) { + this.snapshots = new ArrayList<>(registeredPolicySnapshots.snapshots); + } + + /** + * Add the snapshotId to the registered set if its metadata contains a policyId, meaning that it was initiated by SLM. + * @param userMetadata metadata provided by the user in the CreateSnapshotRequest + * If the request is from SLM it will contain a key "policy" with an SLM policy name as the value. + * @param snapshotId the snapshotId to potentially add to the registered set + */ + void maybeAdd(Map userMetadata, SnapshotId snapshotId) { + final String policy = getPolicyFromMetadata(userMetadata); + if (policy != null) { + snapshots.add(new PolicySnapshot(policy, snapshotId)); + } + } + + RegisteredPolicySnapshots build() { + return new RegisteredPolicySnapshots(snapshots); + } + } + + public static class PolicySnapshot implements SimpleDiffable, Writeable, ToXContentObject { + private final String policy; + private final SnapshotId snapshotId; + + private static final ParseField POLICY = new ParseField("policy"); + private static final ParseField SNAPSHOT_ID = new ParseField("snapshot_id"); + + @SuppressWarnings("unchecked") + private static final ConstructingObjectParser PARSER = new ConstructingObjectParser<>( + "snapshot", + true, + (a, id) -> new PolicySnapshot((String) a[0], (SnapshotId) a[1]) + ); + + static { + PARSER.declareString(ConstructingObjectParser.constructorArg(), POLICY); + PARSER.declareObject(ConstructingObjectParser.constructorArg(), SnapshotId::parse, SNAPSHOT_ID); + } + + public PolicySnapshot(String policy, SnapshotId snapshotId) { + this.policy = policy; + this.snapshotId = snapshotId; + } + + public PolicySnapshot(StreamInput in) throws IOException { + this.policy = in.readString(); + this.snapshotId = new SnapshotId(in); + } + + public String getPolicy() { + return policy; + } + + public SnapshotId getSnapshotId() { + return snapshotId; + } + + public static PolicySnapshot parse(XContentParser parser) { + return PARSER.apply(parser, null); + } + + @Override + public void writeTo(StreamOutput out) throws IOException { + out.writeString(this.policy); + snapshotId.writeTo(out); + } + + @Override + public XContentBuilder toXContent(XContentBuilder builder, Params params) throws IOException { + builder.startObject(); + builder.field(POLICY.getPreferredName(), this.policy); + builder.field(SNAPSHOT_ID.getPreferredName(), this.snapshotId); + builder.endObject(); + return builder; + } + + @Override + public int hashCode() { + return Objects.hash(policy, snapshotId); + } + + @Override + public boolean equals(Object obj) { + if (obj == null || obj.getClass() != getClass()) { + return false; + } + PolicySnapshot other = (PolicySnapshot) obj; + return Objects.equals(policy, other.policy) && Objects.equals(snapshotId, other.snapshotId); + } + + @Override + public String toString() { + return Strings.toString(this); + } + } +} diff --git a/server/src/main/java/org/elasticsearch/snapshots/SnapshotId.java b/server/src/main/java/org/elasticsearch/snapshots/SnapshotId.java index 8a67572040116..2863a6f713510 100644 --- a/server/src/main/java/org/elasticsearch/snapshots/SnapshotId.java +++ b/server/src/main/java/org/elasticsearch/snapshots/SnapshotId.java @@ -11,8 +11,11 @@ import org.elasticsearch.common.io.stream.StreamInput; import org.elasticsearch.common.io.stream.StreamOutput; import org.elasticsearch.common.io.stream.Writeable; +import org.elasticsearch.xcontent.ConstructingObjectParser; +import org.elasticsearch.xcontent.ParseField; import org.elasticsearch.xcontent.ToXContentObject; import org.elasticsearch.xcontent.XContentBuilder; +import org.elasticsearch.xcontent.XContentParser; import java.io.IOException; import java.util.Objects; @@ -22,8 +25,19 @@ */ public final class SnapshotId implements Comparable, Writeable, ToXContentObject { - private static final String NAME = "name"; - private static final String UUID = "uuid"; + private static final ParseField NAME = new ParseField("name"); + private static final ParseField UUID = new ParseField("uuid"); + + public static final ConstructingObjectParser PARSER = new ConstructingObjectParser<>( + "snapshot_id", + true, + a -> new SnapshotId((String) a[0], (String) a[1]) + ); + + static { + PARSER.declareString(ConstructingObjectParser.constructorArg(), NAME); + PARSER.declareString(ConstructingObjectParser.constructorArg(), UUID); + } private final String name; private final String uuid; @@ -54,6 +68,10 @@ public SnapshotId(final StreamInput in) throws IOException { hashCode = computeHashCode(); } + public static SnapshotId parse(XContentParser parser, String text) { + return PARSER.apply(parser, text); + } + /** * Returns snapshot name * @@ -112,8 +130,8 @@ public void writeTo(StreamOutput out) throws IOException { @Override public XContentBuilder toXContent(XContentBuilder builder, Params params) throws IOException { builder.startObject(); - builder.field(NAME, name); - builder.field(UUID, uuid); + builder.field(NAME.getPreferredName(), name); + builder.field(UUID.getPreferredName(), uuid); builder.endObject(); return builder; } diff --git a/server/src/main/java/org/elasticsearch/snapshots/SnapshotsService.java b/server/src/main/java/org/elasticsearch/snapshots/SnapshotsService.java index 75b5a4e6a2ea6..6d7404d7472e5 100644 --- a/server/src/main/java/org/elasticsearch/snapshots/SnapshotsService.java +++ b/server/src/main/java/org/elasticsearch/snapshots/SnapshotsService.java @@ -277,9 +277,7 @@ public void createSnapshot(final CreateSnapshotRequest request, final ActionList final String repositoryName = request.repository(); final String snapshotName = IndexNameExpressionResolver.resolveDateMathExpression(request.snapshot()); validate(repositoryName, snapshotName); - // TODO: create snapshot UUID in CreateSnapshotRequest and make this operation idempotent to cleanly deal with transport layer - // retries - final SnapshotId snapshotId = new SnapshotId(snapshotName, UUIDs.randomBase64UUID()); // new UUID for the snapshot + final SnapshotId snapshotId = new SnapshotId(snapshotName, request.uuid()); Repository repository = repositoriesService.repository(request.repository()); if (repository.isReadOnly()) { listener.onFailure(new RepositoryException(repository.getMetadata().name(), "cannot create snapshot in a readonly repository")); @@ -3956,9 +3954,13 @@ public ClusterState execute(BatchExecutionContext batchExecutionCo ); final SnapshotsInProgress initialSnapshots = SnapshotsInProgress.get(state); SnapshotsInProgress snapshotsInProgress = shardsUpdateContext.computeUpdatedState(); + final RegisteredPolicySnapshots.Builder registeredPolicySnapshots = state.metadata() + .custom(RegisteredPolicySnapshots.TYPE, RegisteredPolicySnapshots.EMPTY) + .builder(); for (final var taskContext : batchExecutionContext.taskContexts()) { if (taskContext.getTask() instanceof CreateSnapshotTask task) { try { + registeredPolicySnapshots.maybeAdd(task.createSnapshotRequest.userMetadata(), task.snapshot.getSnapshotId()); final var repoMeta = RepositoriesMetadata.get(state).repository(task.snapshot.getRepository()); if (Objects.equals(task.initialRepositoryMetadata, repoMeta)) { snapshotsInProgress = createSnapshot(task, taskContext, state, snapshotsInProgress); @@ -3983,7 +3985,11 @@ public ClusterState execute(BatchExecutionContext batchExecutionCo if (snapshotsInProgress == initialSnapshots) { return state; } - return ClusterState.builder(state).putCustom(SnapshotsInProgress.TYPE, snapshotsInProgress).build(); + + return ClusterState.builder(state) + .putCustom(SnapshotsInProgress.TYPE, snapshotsInProgress) + .metadata(Metadata.builder(state.metadata()).putCustom(RegisteredPolicySnapshots.TYPE, registeredPolicySnapshots.build())) + .build(); } private SnapshotsInProgress createSnapshot( diff --git a/server/src/test/java/org/elasticsearch/action/admin/cluster/snapshots/create/CreateSnapshotRequestTests.java b/server/src/test/java/org/elasticsearch/action/admin/cluster/snapshots/create/CreateSnapshotRequestTests.java index 2886ca7be4821..999f22b4dc400 100644 --- a/server/src/test/java/org/elasticsearch/action/admin/cluster/snapshots/create/CreateSnapshotRequestTests.java +++ b/server/src/test/java/org/elasticsearch/action/admin/cluster/snapshots/create/CreateSnapshotRequestTests.java @@ -113,6 +113,7 @@ public void testToXContent() throws IOException { ); processed.waitForCompletion(original.waitForCompletion()); processed.masterNodeTimeout(original.masterNodeTimeout()); + processed.uuid(original.uuid()); processed.source(map); assertEquals(original, processed); diff --git a/server/src/test/java/org/elasticsearch/snapshots/RegisteredPolicySnapshotsSerializationTests.java b/server/src/test/java/org/elasticsearch/snapshots/RegisteredPolicySnapshotsSerializationTests.java new file mode 100644 index 0000000000000..4de6b0c901f1a --- /dev/null +++ b/server/src/test/java/org/elasticsearch/snapshots/RegisteredPolicySnapshotsSerializationTests.java @@ -0,0 +1,113 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License + * 2.0 and the Server Side Public License, v 1; you may not use this file except + * in compliance with, at your election, the Elastic License 2.0 or the Server + * Side Public License, v 1. + */ + +package org.elasticsearch.snapshots; + +import org.elasticsearch.common.io.stream.Writeable; +import org.elasticsearch.test.AbstractChunkedSerializingTestCase; +import org.elasticsearch.test.ESTestCase; +import org.elasticsearch.xcontent.XContentParser; + +import java.io.IOException; +import java.util.ArrayList; +import java.util.HashMap; +import java.util.List; +import java.util.Map; + +public class RegisteredPolicySnapshotsSerializationTests extends AbstractChunkedSerializingTestCase { + public void testMaybeAdd() { + { + RegisteredPolicySnapshots.Builder builder = new RegisteredPolicySnapshots.Builder(RegisteredPolicySnapshots.EMPTY); + var snap = new SnapshotId(randomAlphaOfLength(10), randomUUID()); + + builder.maybeAdd(null, snap); + builder.maybeAdd(Map.of(), snap); + builder.maybeAdd(Map.of("not_policy", "policy-10"), snap); + builder.maybeAdd(Map.of(SnapshotsService.POLICY_ID_METADATA_FIELD, 5), snap); + + // immutable map in Map.of doesn't allows nulls + var meta = new HashMap(); + meta.put(SnapshotsService.POLICY_ID_METADATA_FIELD, null); + builder.maybeAdd(meta, snap); + + RegisteredPolicySnapshots registered = builder.build(); + assertTrue(registered.getSnapshots().isEmpty()); + } + + { + RegisteredPolicySnapshots.Builder builder = new RegisteredPolicySnapshots.Builder(RegisteredPolicySnapshots.EMPTY); + var snap = new SnapshotId(randomAlphaOfLength(10), randomUUID()); + builder.maybeAdd(Map.of(SnapshotsService.POLICY_ID_METADATA_FIELD, "cheddar"), snap); + RegisteredPolicySnapshots registered = builder.build(); + assertEquals(List.of(new RegisteredPolicySnapshots.PolicySnapshot("cheddar", snap)), registered.getSnapshots()); + } + } + + @Override + protected RegisteredPolicySnapshots doParseInstance(XContentParser parser) throws IOException { + return RegisteredPolicySnapshots.parse(parser); + } + + @Override + protected Writeable.Reader instanceReader() { + return RegisteredPolicySnapshots::new; + } + + @Override + protected RegisteredPolicySnapshots createTestInstance() { + return randomRegisteredPolicySnapshots(); + } + + @Override + protected RegisteredPolicySnapshots mutateInstance(RegisteredPolicySnapshots instance) throws IOException { + if (instance.getSnapshots().isEmpty()) { + return new RegisteredPolicySnapshots(List.of(randomPolicySnapshot())); + } + + final int randIndex = between(0, instance.getSnapshots().size() - 1); + final RegisteredPolicySnapshots.PolicySnapshot policySnapshot = instance.getSnapshots().get(randIndex); + + String policy = policySnapshot.getPolicy(); + String snapshotName = policySnapshot.getSnapshotId().getName(); + String snapshotUUID = policySnapshot.getSnapshotId().getUUID(); + + switch (between(0, 2)) { + case 0 -> { + policy = randomValueOtherThan(policy, this::randomPolicy); + } + case 1 -> { + snapshotName = randomValueOtherThan(snapshotName, ESTestCase::randomIdentifier); + } + case 2 -> { + snapshotUUID = randomValueOtherThan(snapshotName, ESTestCase::randomUUID); + } + default -> throw new AssertionError("failure, got illegal switch case"); + } + + List newSnapshots = new ArrayList<>(instance.getSnapshots()); + newSnapshots.set(randIndex, new RegisteredPolicySnapshots.PolicySnapshot(policy, new SnapshotId(snapshotName, snapshotUUID))); + return new RegisteredPolicySnapshots(newSnapshots); + } + + private RegisteredPolicySnapshots randomRegisteredPolicySnapshots() { + final List snapshots = new ArrayList<>(); + for (int i = 0; i < randomIntBetween(0, 20); i++) { + snapshots.add(randomPolicySnapshot()); + } + return new RegisteredPolicySnapshots(snapshots); + } + + private String randomPolicy() { + return "policy-" + randomIntBetween(0, 20); + } + + private RegisteredPolicySnapshots.PolicySnapshot randomPolicySnapshot() { + SnapshotId snapshotId = new SnapshotId(randomIdentifier(), randomUUID()); + return new RegisteredPolicySnapshots.PolicySnapshot(randomPolicy(), snapshotId); + } +} diff --git a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/slm/SnapshotLifecyclePolicyMetadata.java b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/slm/SnapshotLifecyclePolicyMetadata.java index daec14df094c4..0a97810fadacf 100644 --- a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/slm/SnapshotLifecyclePolicyMetadata.java +++ b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/slm/SnapshotLifecyclePolicyMetadata.java @@ -277,6 +277,11 @@ public Builder setInvocationsSinceLastSuccess(long invocationsSinceLastSuccess) return this; } + public Builder incrementInvocationsSinceLastSuccess() { + this.invocationsSinceLastSuccess++; + return this; + } + public SnapshotLifecyclePolicyMetadata build() { return new SnapshotLifecyclePolicyMetadata( Objects.requireNonNull(policy), diff --git a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/slm/SnapshotLifecycleStats.java b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/slm/SnapshotLifecycleStats.java index 427f1e58706eb..8dd0510d2b199 100644 --- a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/slm/SnapshotLifecycleStats.java +++ b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/slm/SnapshotLifecycleStats.java @@ -358,6 +358,14 @@ public String getPolicyId() { return policyId; } + public long getSnapshotTakenCount() { + return snapshotsTaken; + } + + public long getSnapshotFailedCount() { + return snapshotsFailed; + } + @Override public void writeTo(StreamOutput out) throws IOException { out.writeString(policyId); diff --git a/x-pack/plugin/slm/src/internalClusterTest/java/org/elasticsearch/xpack/slm/SLMStatDisruptionIT.java b/x-pack/plugin/slm/src/internalClusterTest/java/org/elasticsearch/xpack/slm/SLMStatDisruptionIT.java new file mode 100644 index 0000000000000..d8b1f36c25e54 --- /dev/null +++ b/x-pack/plugin/slm/src/internalClusterTest/java/org/elasticsearch/xpack/slm/SLMStatDisruptionIT.java @@ -0,0 +1,749 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License + * 2.0; you may not use this file except in compliance with the Elastic License + * 2.0. + */ +package org.elasticsearch.xpack.slm; + +import org.elasticsearch.action.admin.cluster.snapshots.get.GetSnapshotsResponse; +import org.elasticsearch.action.admin.cluster.snapshots.status.SnapshotStatus; +import org.elasticsearch.action.admin.cluster.snapshots.status.SnapshotsStatusResponse; +import org.elasticsearch.action.admin.cluster.state.ClusterStateRequest; +import org.elasticsearch.action.admin.cluster.state.ClusterStateResponse; +import org.elasticsearch.action.admin.indices.refresh.RefreshRequest; +import org.elasticsearch.action.index.IndexRequestBuilder; +import org.elasticsearch.cluster.ClusterState; +import org.elasticsearch.cluster.SnapshotsInProgress; +import org.elasticsearch.cluster.coordination.Coordinator; +import org.elasticsearch.cluster.coordination.FollowersChecker; +import org.elasticsearch.cluster.coordination.LagDetector; +import org.elasticsearch.cluster.coordination.LeaderChecker; +import org.elasticsearch.cluster.metadata.RepositoryMetadata; +import org.elasticsearch.cluster.service.ClusterService; +import org.elasticsearch.common.settings.Settings; +import org.elasticsearch.common.util.BigArrays; +import org.elasticsearch.datastreams.DataStreamsPlugin; +import org.elasticsearch.env.Environment; +import org.elasticsearch.index.snapshots.blobstore.BlobStoreIndexShardSnapshot; +import org.elasticsearch.indices.recovery.RecoverySettings; +import org.elasticsearch.plugins.Plugin; +import org.elasticsearch.plugins.RepositoryPlugin; +import org.elasticsearch.repositories.FinalizeSnapshotContext; +import org.elasticsearch.repositories.RepositoriesMetrics; +import org.elasticsearch.repositories.Repository; +import org.elasticsearch.repositories.SnapshotShardContext; +import org.elasticsearch.repositories.fs.FsRepository; +import org.elasticsearch.snapshots.AbstractSnapshotIntegTestCase; +import org.elasticsearch.snapshots.RegisteredPolicySnapshots; +import org.elasticsearch.snapshots.SnapshotId; +import org.elasticsearch.snapshots.SnapshotInfo; +import org.elasticsearch.snapshots.SnapshotMissingException; +import org.elasticsearch.snapshots.SnapshotState; +import org.elasticsearch.snapshots.mockstore.MockRepository; +import org.elasticsearch.test.ESIntegTestCase; +import org.elasticsearch.test.disruption.NetworkDisruption; +import org.elasticsearch.test.transport.MockTransportService; +import org.elasticsearch.transport.TransportSettings; +import org.elasticsearch.xcontent.NamedXContentRegistry; +import org.elasticsearch.xpack.core.LocalStateCompositeXPackPlugin; +import org.elasticsearch.xpack.core.ilm.LifecycleSettings; +import org.elasticsearch.xpack.core.slm.SnapshotLifecycleMetadata; +import org.elasticsearch.xpack.core.slm.SnapshotLifecyclePolicy; +import org.elasticsearch.xpack.core.slm.SnapshotRetentionConfiguration; +import org.elasticsearch.xpack.core.slm.action.ExecuteSnapshotLifecycleAction; +import org.elasticsearch.xpack.core.slm.action.PutSnapshotLifecycleAction; +import org.elasticsearch.xpack.ilm.IndexLifecycle; +import org.junit.Before; + +import java.io.IOException; +import java.util.Arrays; +import java.util.Collection; +import java.util.Collections; +import java.util.HashMap; +import java.util.List; +import java.util.Map; +import java.util.concurrent.CountDownLatch; +import java.util.concurrent.ExecutionException; +import java.util.concurrent.TimeUnit; +import java.util.concurrent.atomic.AtomicBoolean; + +import static org.hamcrest.Matchers.equalTo; + +/** + * Test that SLM stats can be lost due to master shutdown, + * and then recovered by registering them before snapshotting. + */ +@ESIntegTestCase.ClusterScope(scope = ESIntegTestCase.Scope.TEST, numDataNodes = 0) +public class SLMStatDisruptionIT extends AbstractSnapshotIntegTestCase { + + private static final String NEVER_EXECUTE_CRON_SCHEDULE = "* * * 31 FEB ? *"; + + @Override + protected Collection> nodePlugins() { + return Arrays.asList( + MockRepository.Plugin.class, + MockTransportService.TestPlugin.class, + LocalStateCompositeXPackPlugin.class, + IndexLifecycle.class, + SnapshotLifecycle.class, + DataStreamsPlugin.class, + TestDelayedRepoPlugin.class, + TestRestartBeforeListenersRepoPlugin.class + ); + } + + @Override + protected Settings nodeSettings(int nodeOrdinal, Settings otherSettings) { + return Settings.builder() + .put(super.nodeSettings(nodeOrdinal, otherSettings)) + .put(LifecycleSettings.LIFECYCLE_HISTORY_INDEX_ENABLED, false) + .put(DEFAULT_SETTINGS) + .build(); + } + + // copied from AbstractDisruptionTestCase.DEFAULT_SETTINGS + public static final Settings DEFAULT_SETTINGS = Settings.builder() + .put(LeaderChecker.LEADER_CHECK_TIMEOUT_SETTING.getKey(), "5s") // for hitting simulated network failures quickly + .put(LeaderChecker.LEADER_CHECK_RETRY_COUNT_SETTING.getKey(), 1) // for hitting simulated network failures quickly + .put(FollowersChecker.FOLLOWER_CHECK_TIMEOUT_SETTING.getKey(), "5s") // for hitting simulated network failures quickly + .put(FollowersChecker.FOLLOWER_CHECK_RETRY_COUNT_SETTING.getKey(), 1) // for hitting simulated network failures quickly + .put(Coordinator.PUBLISH_TIMEOUT_SETTING.getKey(), "5s") // <-- for hitting simulated network failures quickly + .put(LagDetector.CLUSTER_FOLLOWER_LAG_TIMEOUT_SETTING.getKey(), "5s") // remove lagging nodes quickly so they can rejoin + .put(TransportSettings.CONNECT_TIMEOUT.getKey(), "10s") // Network delay disruption waits for the min between this + // value and the time of disruption and does not recover immediately + // when disruption is stop. We should make sure we recover faster + // then the default of 30s, causing ensureGreen and friends to time out + .build(); + + public static class TestDelayedRepoPlugin extends Plugin implements RepositoryPlugin { + + // Use static vars since instantiated by plugin system + private static final AtomicBoolean doDelay = new AtomicBoolean(true); + private static final CountDownLatch delayedRepoLatch = new CountDownLatch(1); + + static void removeDelay() { + delayedRepoLatch.countDown(); + } + + @Override + public Map getRepositories( + Environment env, + NamedXContentRegistry namedXContentRegistry, + ClusterService clusterService, + BigArrays bigArrays, + RecoverySettings recoverySettings, + RepositoriesMetrics repositoriesMetrics + ) { + return Map.of( + TestDelayedRepo.TYPE, + metadata -> new TestDelayedRepo(metadata, env, namedXContentRegistry, clusterService, bigArrays, recoverySettings, () -> { + // Only delay the first request + if (doDelay.getAndSet(false)) { + try { + assertTrue(delayedRepoLatch.await(1, TimeUnit.MINUTES)); + } catch (InterruptedException e) { + throw new RuntimeException(e); + } + } + }) + ); + } + } + + static class TestDelayedRepo extends FsRepository { + private static final String TYPE = "delayed"; + private final Runnable delayFn; + + protected TestDelayedRepo( + RepositoryMetadata metadata, + Environment env, + NamedXContentRegistry namedXContentRegistry, + ClusterService clusterService, + BigArrays bigArrays, + RecoverySettings recoverySettings, + Runnable delayFn + ) { + super(metadata, env, namedXContentRegistry, clusterService, bigArrays, recoverySettings); + this.delayFn = delayFn; + } + + @Override + protected void snapshotFile(SnapshotShardContext context, BlobStoreIndexShardSnapshot.FileInfo fileInfo) throws IOException { + delayFn.run(); + super.snapshotFile(context, fileInfo); + } + } + + public static class TestRestartBeforeListenersRepoPlugin extends Plugin implements RepositoryPlugin { + + // Use static vars since instantiated by plugin system + private static Runnable onResponse; + + static void setOnResponse(Runnable onResponse) { + TestRestartBeforeListenersRepoPlugin.onResponse = onResponse; + } + + static void clearOnResponse() { + onResponse = () -> {}; + } + + @Override + public Map getRepositories( + Environment env, + NamedXContentRegistry namedXContentRegistry, + ClusterService clusterService, + BigArrays bigArrays, + RecoverySettings recoverySettings, + RepositoriesMetrics repositoriesMetrics + ) { + return Map.of( + TestRestartBeforeListenersRepo.TYPE, + metadata -> new TestRestartBeforeListenersRepo( + metadata, + env, + namedXContentRegistry, + clusterService, + bigArrays, + recoverySettings, + // add wrapper lambda so can change underlying runnable with static setOnResponse() method + () -> onResponse.run() + ) + ); + } + } + + /** + * Repo which forces a runnable to be run after snapshot finalization, but before callbacks + * which receive the SnapshotInfo, specifically the SLM callback which will save failure stats. + */ + static class TestRestartBeforeListenersRepo extends FsRepository { + private static final String TYPE = "restart_before_listeners"; + + private final Runnable beforeResponseRunnable; + + protected TestRestartBeforeListenersRepo( + RepositoryMetadata metadata, + Environment env, + NamedXContentRegistry namedXContentRegistry, + ClusterService clusterService, + BigArrays bigArrays, + RecoverySettings recoverySettings, + Runnable beforeResponseRunnable + ) { + super(metadata, env, namedXContentRegistry, clusterService, bigArrays, recoverySettings); + this.beforeResponseRunnable = beforeResponseRunnable; + } + + @Override + public void finalizeSnapshot(FinalizeSnapshotContext fsc) { + var newFinalizeContext = new FinalizeSnapshotContext( + fsc.updatedShardGenerations(), + fsc.repositoryStateId(), + fsc.clusterMetadata(), + fsc.snapshotInfo(), + fsc.repositoryMetaVersion(), + fsc, + snapshotInfo -> { + // run the passed lambda before calling the usual callback + // this is where the cluster can be restarted before SLM is called back with the snapshotInfo + beforeResponseRunnable.run(); + fsc.onDone(snapshotInfo); + } + ); + super.finalizeSnapshot(newFinalizeContext); + } + } + + @Before + public void clearRepoFinalizeRunnable() { + TestRestartBeforeListenersRepoPlugin.clearOnResponse(); + } + + /** + * Test that if there is a currently running snapshot it is not inferred to be a failure + */ + public void testCurrentlyRunningSnapshotNotRecordedAsFailure() throws Exception { + final String idxName = "test-idx"; + final String repoName = "test-repo"; + final String policyName = "test-policy"; + + internalCluster().startMasterOnlyNodes(1); + final String masterNode = internalCluster().getMasterName(); + final String dataNode = internalCluster().startDataOnlyNode(); + ensureStableCluster(2); + + createRandomIndex(idxName, dataNode); + createRepository(repoName, TestDelayedRepo.TYPE); + createSnapshotPolicy(policyName, "snap", NEVER_EXECUTE_CRON_SCHEDULE, repoName, idxName); + + ensureGreen(); + + String snapshotA = executePolicy(masterNode, policyName); + logger.info("Created snapshot A: " + snapshotA); + + // wait until snapshotA is registered before starting snapshotB + assertBusy(() -> assertRegistered(policyName, List.of(snapshotA)), 1, TimeUnit.MINUTES); + + // create another snapshot while A is still running + String snapshotB = executePolicy(masterNode, policyName); + logger.info("Created snapshot B: " + snapshotB); + + // wait until both snapshots are registered before allowing snapshotA to continue + assertBusy(() -> assertRegistered(policyName, List.of(snapshotA, snapshotB)), 1, TimeUnit.MINUTES); + + // remove delay from snapshotA allowing it to finish + TestDelayedRepoPlugin.removeDelay(); + + waitForSnapshot(repoName, snapshotA); + waitForSnapshot(repoName, snapshotB); + + assertBusy(() -> { + assertSnapshotSuccess(repoName, snapshotA); + assertSnapshotSuccess(repoName, snapshotB); + assertMetadata(policyName, 2, 0, 0); + assertRegistered(policyName, List.of()); + + }, 1, TimeUnit.MINUTES); + } + + /** + * Test that after successful snapshot registered is empty + */ + public void testSuccessSnapshot() throws Exception { + final String idxName = "test-idx"; + final String repoName = "test-repo"; + final String policyName = "test-policy"; + + internalCluster().startMasterOnlyNodes(1); + final String masterNode = internalCluster().getMasterName(); + final String dataNode = internalCluster().startDataOnlyNode(); + ensureStableCluster(2); + + createRandomIndex(idxName, dataNode); + createRepository(repoName, TestRestartBeforeListenersRepo.TYPE); + createSnapshotPolicy(policyName, "snap", NEVER_EXECUTE_CRON_SCHEDULE, repoName, idxName); + + ensureGreen(); + + String snapshotName = executePolicy(masterNode, policyName); + logger.info("Created snapshot: " + snapshotName); + + waitForSnapshot(repoName, snapshotName); + + assertBusy(() -> { + assertSnapshotSuccess(repoName, snapshotName); + assertMetadata(policyName, 1, 0, 0); + assertRegistered(policyName, List.of()); + }, 1, TimeUnit.MINUTES); + } + + /** + * Test that after a failure which fails stats uploads, then a success, + * registered snapshot from failure is added to invocationsSinceLastSuccess. + */ + public void testFailSnapshotFailStatsThenSuccessRecoverStats() throws Exception { + final String idxName = "test-idx"; + final String repoName = "test-repo"; + final String policyName = "test-policy"; + + internalCluster().startMasterOnlyNodes(1); + final String masterNode = internalCluster().getMasterName(); + final String dataNode = internalCluster().startDataOnlyNode(); + ensureStableCluster(2); + + // Add network disruption so snapshot fails with PARTIAL status + NetworkDisruption networkDisruption = isolateMasterDisruption(NetworkDisruption.DISCONNECT); + internalCluster().setDisruptionScheme(networkDisruption); + + // wire into repo so code can be run on test thread after snapshot finalize, but before SLM is called back + var runDuringFinalize = new RunDuringFinalize(); + TestRestartBeforeListenersRepoPlugin.setOnResponse(runDuringFinalize.finalizeThreadRunnable()); + + createRandomIndex(idxName, dataNode); + createRepository(repoName, TestRestartBeforeListenersRepo.TYPE); + createSnapshotPolicy(policyName, "snap", NEVER_EXECUTE_CRON_SCHEDULE, repoName, idxName); + + ensureGreen(); + + networkDisruption.startDisrupting(); + String snapshotName = executePolicy(masterNode, policyName); + logger.info("Created snapshot: " + snapshotName); + + // restart snapshot after snapshot finalize, but before SLM callback called + runDuringFinalize.awaitAndRun(() -> { + try { + internalCluster().restartNode(masterNode); + } catch (Exception e) { + throw new RuntimeException(e); + } + }); + + assertBusy(() -> { + assertSnapshotPartial(repoName, snapshotName); + assertMetadata(policyName, 0, 0, 0); + assertRegistered(policyName, List.of(snapshotName)); + }, 1, TimeUnit.MINUTES); + + awaitNoMoreRunningOperations(); + ensureGreen(); + + // Now execute again, and succeed. The failure from the previous run will now be recorded. + + TestRestartBeforeListenersRepoPlugin.clearOnResponse(); + + final String snapshotName2 = executePolicy(masterNode, policyName); + assertNotEquals(snapshotName, snapshotName2); + logger.info("Created snapshot: " + snapshotName2); + + waitForSnapshot(repoName, snapshotName2); + + assertBusy(() -> { + assertSnapshotSuccess(repoName, snapshotName2); + // Check stats, this time past failure should be accounted for + assertMetadata(policyName, 1, 1, 0); + assertRegistered(policyName, List.of()); + }, 1, TimeUnit.MINUTES); + } + + /** + * Test that after a failure then a failure that successfully sets stats + * registeredRuns from failure is added to invocationsSinceLastSuccess. + */ + public void testFailSnapshotFailStatsRecoverStats() throws Exception { + final String idxName = "test-idx"; + final String repoName = "test-repo"; + final String policyName = "test-policy"; + + internalCluster().startMasterOnlyNodes(1); + final String masterNode = internalCluster().getMasterName(); + final String dataNode = internalCluster().startDataOnlyNode(); + ensureStableCluster(2); + + // Add network disruption so snapshot fails with PARTIAL status + NetworkDisruption networkDisruption = isolateMasterDisruption(NetworkDisruption.DISCONNECT); + internalCluster().setDisruptionScheme(networkDisruption); + + // wire into repo so code can be run on test thread after snapshot finalize, but before SLM is called back + var runDuringFinalize = new RunDuringFinalize(); + TestRestartBeforeListenersRepoPlugin.setOnResponse(runDuringFinalize.finalizeThreadRunnable()); + + createRandomIndex(idxName, dataNode); + createRepository(repoName, TestRestartBeforeListenersRepo.TYPE); + createSnapshotPolicy(policyName, "snap", NEVER_EXECUTE_CRON_SCHEDULE, repoName, idxName); + + awaitNoMoreRunningOperations(); + ensureGreen(); + + networkDisruption.startDisrupting(); + String snapshotName = executePolicy(masterNode, policyName); + logger.info("Created snapshot: " + snapshotName); + + // restart snapshot after snapshot finalize, but before SLM callback called + runDuringFinalize.awaitAndRun(() -> { + try { + internalCluster().restartNode(masterNode); + } catch (Exception e) { + throw new RuntimeException(e); + } + }); + + assertBusy(() -> { + assertSnapshotPartial(repoName, snapshotName); + assertMetadata(policyName, 0, 0, 0); + assertRegistered(policyName, List.of(snapshotName)); + }, 1, TimeUnit.MINUTES); + + awaitNoMoreRunningOperations(); + ensureGreen(); + + // Now execute again, but don't fail the stat upload. The failure from the previous run will now be recorded. + var runDuringFinalize2 = new RunDuringFinalize(); + TestRestartBeforeListenersRepoPlugin.setOnResponse(runDuringFinalize2.finalizeThreadRunnable()); + + networkDisruption.startDisrupting(); + final String snapshotName2 = executePolicy(masterNode, policyName); + assertNotEquals(snapshotName, snapshotName2); + logger.info("Created snapshot: " + snapshotName2); + + runDuringFinalize2.awaitAndRun(networkDisruption::stopDisrupting); + + assertBusy(() -> { + assertSnapshotPartial(repoName, snapshotName2); + // Check metadata, this time past failure should be accounted for + assertMetadata(policyName, 0, 2, 2); + assertRegistered(policyName, List.of()); + }, 1, TimeUnit.MINUTES); + } + + /** + * Test that after a failed snapshot with a master restart during stat upload, update of invocationsSinceLastSuccess is lost. + */ + public void testFailedSnapshotFailStats() throws Exception { + final String idxName = "test-idx"; + final String repoName = "test-repo"; + final String policyName = "test-policy"; + + internalCluster().startMasterOnlyNodes(1); + final String masterNode = internalCluster().getMasterName(); + final String dataNode = internalCluster().startDataOnlyNode(); + ensureStableCluster(2); + + // Add network disruption so snapshot fails with PARTIAL status + NetworkDisruption networkDisruption = isolateMasterDisruption(NetworkDisruption.DISCONNECT); + internalCluster().setDisruptionScheme(networkDisruption); + + // wire into repo so code can be run on test thread after snapshot finalize, but before SLM is called back + var runDuringFinalize = new RunDuringFinalize(); + TestRestartBeforeListenersRepoPlugin.setOnResponse(runDuringFinalize.finalizeThreadRunnable()); + + createRandomIndex(idxName, dataNode); + createRepository(repoName, TestRestartBeforeListenersRepo.TYPE); + createSnapshotPolicy(policyName, "snap", NEVER_EXECUTE_CRON_SCHEDULE, repoName, idxName); + + ensureGreen(); + + networkDisruption.startDisrupting(); + String snapshotName = executePolicy(masterNode, policyName); + + // restart snapshot after snapshot finalize, but before SLM callback called + runDuringFinalize.awaitAndRun(() -> { + try { + internalCluster().restartNode(masterNode); + } catch (Exception e) { + throw new RuntimeException(e); + } + }); + + assertBusy(() -> { + assertSnapshotPartial(repoName, snapshotName); + assertMetadata(policyName, 0, 0, 0); + }, 1, TimeUnit.MINUTES); + } + + /** + * Confirm normal behavior during failure that successfully sets stats in cluster state. + */ + public void testFailedSnapshotSubmitStats() throws Exception { + final String idxName = "test-idx"; + final String repoName = "test-repo"; + final String policyName = "test-policy"; + + internalCluster().startMasterOnlyNodes(1); + final String masterNode = internalCluster().getMasterName(); + final String dataNode = internalCluster().startDataOnlyNode(); + ensureStableCluster(2); + + // Add network disruption so snapshot fails with PARTIAL status + NetworkDisruption networkDisruption = isolateMasterDisruption(NetworkDisruption.DISCONNECT); + internalCluster().setDisruptionScheme(networkDisruption); + + // wire into repo so code can be run on test thread after snapshot finalize, but before SLM is called back + var runDuringFinalize = new RunDuringFinalize(); + TestRestartBeforeListenersRepoPlugin.setOnResponse(runDuringFinalize.finalizeThreadRunnable()); + + createRandomIndex(idxName, dataNode); + createRepository(repoName, TestRestartBeforeListenersRepo.TYPE); + createSnapshotPolicy(policyName, "snap", NEVER_EXECUTE_CRON_SCHEDULE, repoName, idxName); + + ensureGreen(); + + networkDisruption.startDisrupting(); + String snapshotName = executePolicy(masterNode, policyName); + + // wait for snapshot to complete and network disruption to stop + runDuringFinalize.awaitAndRun(networkDisruption::stopDisrupting); + + assertBusy(() -> { + assertSnapshotPartial(repoName, snapshotName); + assertMetadata(policyName, 0, 1, 1); + }, 1, TimeUnit.MINUTES); + } + + private void assertMetadata(String policyName, long taken, long failure, long invocationsSinceLastSuccess) { + var snapshotLifecycleMetadata = getSnapshotLifecycleMetadata(); + var snapshotLifecyclePolicyMetadata = snapshotLifecycleMetadata.getSnapshotConfigurations().get(policyName); + assertStats(snapshotLifecycleMetadata, policyName, taken, failure); + if (taken > 0) { + assertNotNull(snapshotLifecyclePolicyMetadata.getLastSuccess()); + } else { + assertNull(snapshotLifecyclePolicyMetadata.getLastSuccess()); + } + if (failure > 0) { + assertNotNull(snapshotLifecyclePolicyMetadata.getLastFailure()); + } else { + assertNull(snapshotLifecyclePolicyMetadata.getLastFailure()); + } + assertEquals(invocationsSinceLastSuccess, snapshotLifecyclePolicyMetadata.getInvocationsSinceLastSuccess()); + } + + private SnapshotLifecycleMetadata getSnapshotLifecycleMetadata() { + final ClusterStateResponse clusterStateResponse = client().admin().cluster().state(new ClusterStateRequest()).actionGet(); + ClusterState state = clusterStateResponse.getState(); + return state.metadata().custom(SnapshotLifecycleMetadata.TYPE); + } + + private RegisteredPolicySnapshots getRegisteredSnapshots() { + final ClusterStateResponse clusterStateResponse = client().admin().cluster().state(new ClusterStateRequest()).actionGet(); + ClusterState state = clusterStateResponse.getState(); + return state.metadata().custom(RegisteredPolicySnapshots.TYPE, RegisteredPolicySnapshots.EMPTY); + } + + private SnapshotInfo getSnapshotInfo(String repository, String snapshot) { + GetSnapshotsResponse snapshotsStatusResponse = clusterAdmin().prepareGetSnapshots(TEST_REQUEST_TIMEOUT, repository) + .setSnapshots(snapshot) + .get(); + return snapshotsStatusResponse.getSnapshots().get(0); + } + + private SnapshotsStatusResponse getSnapshotStatus(String repo, String snapshotName) { + return clusterAdmin().prepareSnapshotStatus(TEST_REQUEST_TIMEOUT, repo).setSnapshots(snapshotName).get(); + } + + private void assertSnapshotSuccess(String repository, String snapshot) { + try { + SnapshotInfo snapshotInfo = getSnapshotInfo(repository, snapshot); + assertEquals(SnapshotState.SUCCESS, snapshotInfo.state()); + assertEquals(1, snapshotInfo.successfulShards()); + assertEquals(0, snapshotInfo.failedShards()); + logger.info("Checked snapshot exists and is state SUCCESS"); + } catch (SnapshotMissingException e) { + fail("expected a snapshot with name " + snapshot + " but it does yet not exist"); + } + } + + private void assertSnapshotPartial(String repository, String snapshot) { + try { + SnapshotInfo snapshotInfo = getSnapshotInfo(repository, snapshot); + assertEquals(SnapshotState.PARTIAL, snapshotInfo.state()); + assertEquals(0, snapshotInfo.successfulShards()); + assertEquals(1, snapshotInfo.failedShards()); + logger.info("Checked snapshot exists and is state PARTIAL"); + } catch (SnapshotMissingException e) { + fail("expected a snapshot with name " + snapshot + " but it does yet not exist"); + } + } + + private void assertStats(SnapshotLifecycleMetadata snapshotLifecycleMetadata, String policyName, long taken, long failed) { + var stats = snapshotLifecycleMetadata.getStats().getMetrics().get(policyName); + logger.info("stats: " + stats); + if (taken == 0 && failed == 0) { + assertTrue(stats == null || (stats.getSnapshotTakenCount() == 0 && stats.getSnapshotFailedCount() == 0)); + } else { + assertNotNull(stats); + assertEquals(taken, stats.getSnapshotTakenCount()); + assertEquals(failed, stats.getSnapshotFailedCount()); + } + } + + private void assertRegistered(String policyName, List expected) { + var registered = getRegisteredSnapshots(); + var policySnaps = registered.getSnapshotsByPolicy(policyName).stream().map(SnapshotId::getName).toList(); + assertEquals(expected, policySnaps); + } + + private void createRandomIndex(String idxName, String dataNodeName) throws InterruptedException { + Settings settings = indexSettings(1, 0).put("index.routing.allocation.require._name", dataNodeName).build(); + createIndex(idxName, settings); + + logger.info("--> indexing some data"); + final int numdocs = randomIntBetween(10, 100); + IndexRequestBuilder[] builders = new IndexRequestBuilder[numdocs]; + for (int i = 0; i < builders.length; i++) { + builders[i] = prepareIndex(idxName).setId(Integer.toString(i)).setSource("field1", "bar " + i); + } + indexRandom(true, builders); + indicesAdmin().refresh(new RefreshRequest(idxName)).actionGet(); + } + + private void createSnapshotPolicy(String policyName, String snapshotNamePattern, String schedule, String repoId, String indexPattern) { + Map snapConfig = new HashMap<>(); + snapConfig.put("indices", Collections.singletonList(indexPattern)); + snapConfig.put("ignore_unavailable", false); + snapConfig.put("partial", true); + + SnapshotLifecyclePolicy policy = new SnapshotLifecyclePolicy( + policyName, + snapshotNamePattern, + schedule, + repoId, + snapConfig, + SnapshotRetentionConfiguration.EMPTY + ); + + PutSnapshotLifecycleAction.Request putLifecycle = new PutSnapshotLifecycleAction.Request( + TEST_REQUEST_TIMEOUT, + TEST_REQUEST_TIMEOUT, + policyName, + policy + ); + try { + client().execute(PutSnapshotLifecycleAction.INSTANCE, putLifecycle).get(); + } catch (Exception e) { + logger.error("failed to create slm policy", e); + fail("failed to create policy " + policy + " got: " + e); + } + } + + /** + * Execute the given policy and return the generated snapshot name + */ + private String executePolicy(String node, String policyId) throws ExecutionException, InterruptedException { + ExecuteSnapshotLifecycleAction.Request executeReq = new ExecuteSnapshotLifecycleAction.Request( + TEST_REQUEST_TIMEOUT, + TEST_REQUEST_TIMEOUT, + policyId + ); + ExecuteSnapshotLifecycleAction.Response resp = client(node).execute(ExecuteSnapshotLifecycleAction.INSTANCE, executeReq).get(); + return resp.getSnapshotName(); + } + + private void waitForSnapshot(String repo, String snapshotName) throws Exception { + assertBusy(() -> { + try { + SnapshotsStatusResponse s = getSnapshotStatus(repo, snapshotName); + assertThat("expected a snapshot but none were returned", s.getSnapshots().size(), equalTo(1)); + SnapshotStatus status = s.getSnapshots().get(0); + logger.info("--> waiting for snapshot {} to be completed, got: {}", snapshotName, status.getState()); + assertThat(status.getState(), equalTo(SnapshotsInProgress.State.SUCCESS)); + } catch (SnapshotMissingException e) { + logger.error("expected a snapshot but it was missing", e); + fail("expected a snapshot with name " + snapshotName + " but it does not exist"); + } + }); + } + + /** + * The purpose of this class is to allow a cluster restart to occur after a snapshot has been finalized, but before the SLM callback + * which processes snapshotInfo has been called. TestRestartBeforeListenersRepo allows a runnable to be called at this time. But, + * internalCluster().restartNode() cannot be called on the thread running finalize. It is likely missing necessary context, or simply + * cannot be called by a thread on the master node. This helper class uses latches to run a callback on the test thread while the + * master node is waiting between finalizing the snapshot and before calling the SLM callback. + */ + static class RunDuringFinalize { + private final CountDownLatch latch1 = new CountDownLatch(1); + private final CountDownLatch latch2 = new CountDownLatch(1); + + Runnable finalizeThreadRunnable() { + return () -> { + latch1.countDown(); + try { + // While this is waiting the runnable called in awaitAndRan will be called by another thread. + // The runnable is usually a cluster restart. During a restart, 10s is waited before this waiting thread is preempted. + // To keep the tests fast, this wait is kept to 1s. This is actually a race condition which could cause a test to fail, + // as the SLM callback could be called. But, after 1s, the restart should have started so this is unlikely. + latch2.await(1, TimeUnit.SECONDS); + } catch (InterruptedException e) { + throw new RuntimeException(e); + } + }; + } + + void awaitAndRun(Runnable runnable) throws InterruptedException { + assertTrue(latch1.await(1, TimeUnit.MINUTES)); + // this is where the cluster restart occurs + runnable.run(); + latch2.countDown(); + } + } +} diff --git a/x-pack/plugin/slm/src/main/java/org/elasticsearch/xpack/slm/SnapshotLifecycle.java b/x-pack/plugin/slm/src/main/java/org/elasticsearch/xpack/slm/SnapshotLifecycle.java index cb0344dd70ad5..0d79ecf31670c 100644 --- a/x-pack/plugin/slm/src/main/java/org/elasticsearch/xpack/slm/SnapshotLifecycle.java +++ b/x-pack/plugin/slm/src/main/java/org/elasticsearch/xpack/slm/SnapshotLifecycle.java @@ -32,6 +32,7 @@ import org.elasticsearch.reservedstate.ReservedClusterStateHandler; import org.elasticsearch.rest.RestController; import org.elasticsearch.rest.RestHandler; +import org.elasticsearch.snapshots.RegisteredPolicySnapshots; import org.elasticsearch.threadpool.ThreadPool; import org.elasticsearch.xcontent.NamedXContentRegistry; import org.elasticsearch.xcontent.ParseField; @@ -169,6 +170,11 @@ private static List xContentEntries() { Metadata.Custom.class, new ParseField(SnapshotLifecycleMetadata.TYPE), parser -> SnapshotLifecycleMetadata.PARSER.parse(parser, null) + ), + new NamedXContentRegistry.Entry( + Metadata.Custom.class, + new ParseField(RegisteredPolicySnapshots.TYPE), + RegisteredPolicySnapshots::parse ) ); } diff --git a/x-pack/plugin/slm/src/main/java/org/elasticsearch/xpack/slm/SnapshotLifecycleTask.java b/x-pack/plugin/slm/src/main/java/org/elasticsearch/xpack/slm/SnapshotLifecycleTask.java index ac58771936019..adf011e0ade37 100644 --- a/x-pack/plugin/slm/src/main/java/org/elasticsearch/xpack/slm/SnapshotLifecycleTask.java +++ b/x-pack/plugin/slm/src/main/java/org/elasticsearch/xpack/slm/SnapshotLifecycleTask.java @@ -16,13 +16,17 @@ import org.elasticsearch.client.internal.Client; import org.elasticsearch.cluster.ClusterState; import org.elasticsearch.cluster.ClusterStateUpdateTask; +import org.elasticsearch.cluster.SnapshotsInProgress; import org.elasticsearch.cluster.metadata.Metadata; import org.elasticsearch.cluster.service.ClusterService; import org.elasticsearch.common.Strings; import org.elasticsearch.common.scheduler.SchedulerEngine; import org.elasticsearch.core.SuppressForbidden; import org.elasticsearch.core.TimeValue; +import org.elasticsearch.snapshots.RegisteredPolicySnapshots; +import org.elasticsearch.snapshots.RegisteredPolicySnapshots.PolicySnapshot; import org.elasticsearch.snapshots.SnapshotException; +import org.elasticsearch.snapshots.SnapshotId; import org.elasticsearch.snapshots.SnapshotInfo; import org.elasticsearch.xcontent.ToXContent; import org.elasticsearch.xpack.core.ClientHelper; @@ -36,9 +40,14 @@ import java.io.IOException; import java.time.Instant; +import java.util.ArrayList; import java.util.HashMap; +import java.util.HashSet; +import java.util.List; +import java.util.Locale; import java.util.Map; import java.util.Optional; +import java.util.Set; import static org.elasticsearch.core.Strings.format; import static org.elasticsearch.xpack.core.ilm.LifecycleOperationMetadata.currentSLMMode; @@ -93,6 +102,8 @@ public static Optional maybeTakeSnapshot( String snapshotName = maybeMetadata.map(policyMetadata -> { // don't time out on this request to not produce failed SLM runs in case of a temporarily slow master node CreateSnapshotRequest request = policyMetadata.getPolicy().toRequest(TimeValue.MAX_VALUE); + final SnapshotId snapshotId = new SnapshotId(request.snapshot(), request.uuid()); + final LifecyclePolicySecurityClient clientWithHeaders = new LifecyclePolicySecurityClient( client, ClientHelper.INDEX_LIFECYCLE_ORIGIN, @@ -120,7 +131,7 @@ public void onResponse(CreateSnapshotResponse createSnapshotResponse) { submitUnbatchedTask( clusterService, "slm-record-success-" + policyMetadata.getPolicy().getId(), - WriteJobStatus.success(policyMetadata.getPolicy().getId(), request.snapshot(), snapshotStartTime, timestamp) + WriteJobStatus.success(policyMetadata.getPolicy().getId(), snapshotId, snapshotStartTime, timestamp) ); historyStore.putAsync( SnapshotHistoryItem.creationSuccessRecord(timestamp, policyMetadata.getPolicy(), request.snapshot()) @@ -145,7 +156,7 @@ public void onFailure(Exception e) { submitUnbatchedTask( clusterService, "slm-record-failure-" + policyMetadata.getPolicy().getId(), - WriteJobStatus.failure(policyMetadata.getPolicy().getId(), request.snapshot(), timestamp, e) + WriteJobStatus.failure(policyMetadata.getPolicy().getId(), snapshotId, timestamp, e) ); final SnapshotHistoryItem failureRecord; try { @@ -157,7 +168,8 @@ public void onFailure(Exception e) { ); historyStore.putAsync(failureRecord); } catch (IOException ex) { - // This shouldn't happen unless there's an issue with serializing the original exception, which shouldn't happen + // This shouldn't happen unless there's an issue with serializing the original exception, which + // shouldn't happen logger.error( () -> format( "failed to record snapshot creation failure for snapshot lifecycle policy [%s]", @@ -204,53 +216,65 @@ public static String exceptionToString(Exception ex) { }, ToXContent.EMPTY_PARAMS); } + static Set currentlyRunningSnapshots(ClusterState clusterState) { + final SnapshotsInProgress snapshots = clusterState.custom(SnapshotsInProgress.TYPE, SnapshotsInProgress.EMPTY); + final Set currentlyRunning = new HashSet<>(); + for (final List entriesForRepo : snapshots.entriesByRepo()) { + for (SnapshotsInProgress.Entry entry : entriesForRepo) { + currentlyRunning.add(entry.snapshot().getSnapshotId()); + } + } + return currentlyRunning; + } + + static SnapshotInvocationRecord buildFailedSnapshotRecord(SnapshotId snapshot) { + return new SnapshotInvocationRecord( + snapshot.getName(), + null, + Instant.now().toEpochMilli(), + String.format(Locale.ROOT, "found registered snapshot [%s] which is no longer running, assuming failed.", snapshot.getName()) + ); + } + /** * A cluster state update task to write the result of a snapshot job to the cluster metadata for the associated policy. */ - private static class WriteJobStatus extends ClusterStateUpdateTask { + static class WriteJobStatus extends ClusterStateUpdateTask { private final String policyName; - private final String snapshotName; + private final SnapshotId snapshotId; private final long snapshotStartTime; private final long snapshotFinishTime; private final Optional exception; private WriteJobStatus( String policyName, - String snapshotName, + SnapshotId snapshotId, long snapshotStartTime, long snapshotFinishTime, Optional exception ) { this.policyName = policyName; - this.snapshotName = snapshotName; + this.snapshotId = snapshotId; this.exception = exception; this.snapshotStartTime = snapshotStartTime; this.snapshotFinishTime = snapshotFinishTime; } - static WriteJobStatus success(String policyId, String snapshotName, long snapshotStartTime, long snapshotFinishTime) { - return new WriteJobStatus(policyId, snapshotName, snapshotStartTime, snapshotFinishTime, Optional.empty()); + static WriteJobStatus success(String policyId, SnapshotId snapshotId, long snapshotStartTime, long snapshotFinishTime) { + return new WriteJobStatus(policyId, snapshotId, snapshotStartTime, snapshotFinishTime, Optional.empty()); } - static WriteJobStatus failure(String policyId, String snapshotName, long timestamp, Exception exception) { - return new WriteJobStatus(policyId, snapshotName, timestamp, timestamp, Optional.of(exception)); + static WriteJobStatus failure(String policyId, SnapshotId snapshotId, long timestamp, Exception exception) { + return new WriteJobStatus(policyId, snapshotId, timestamp, timestamp, Optional.of(exception)); } @Override public ClusterState execute(ClusterState currentState) throws Exception { - SnapshotLifecycleMetadata snapMeta = currentState.metadata().custom(SnapshotLifecycleMetadata.TYPE); - - assert snapMeta != null : "this should never be called while the snapshot lifecycle cluster metadata is null"; - if (snapMeta == null) { - logger.error( - "failed to record snapshot [{}] for snapshot [{}] in policy [{}]: snapshot lifecycle metadata is null", - exception.isPresent() ? "failure" : "success", - snapshotName, - policyName - ); - return currentState; - } + SnapshotLifecycleMetadata snapMeta = currentState.metadata() + .custom(SnapshotLifecycleMetadata.TYPE, SnapshotLifecycleMetadata.EMPTY); + RegisteredPolicySnapshots registeredSnapshots = currentState.metadata() + .custom(RegisteredPolicySnapshots.TYPE, RegisteredPolicySnapshots.EMPTY); Map snapLifecycles = new HashMap<>(snapMeta.getSnapshotConfigurations()); SnapshotLifecyclePolicyMetadata policyMetadata = snapLifecycles.get(policyName); @@ -258,30 +282,62 @@ public ClusterState execute(ClusterState currentState) throws Exception { logger.warn( "failed to record snapshot [{}] for snapshot [{}] in policy [{}]: policy not found", exception.isPresent() ? "failure" : "success", - snapshotName, + snapshotId.getName(), policyName ); return currentState; } - SnapshotLifecyclePolicyMetadata.Builder newPolicyMetadata = SnapshotLifecyclePolicyMetadata.builder(policyMetadata); - final SnapshotLifecycleStats stats = snapMeta.getStats(); + final SnapshotLifecyclePolicyMetadata.Builder newPolicyMetadata = SnapshotLifecyclePolicyMetadata.builder(policyMetadata); + SnapshotLifecycleStats newStats = snapMeta.getStats(); + + if (registeredSnapshots.contains(snapshotId) == false) { + logger.warn( + "Snapshot [{}] not found in registered set after snapshot completion. This means snapshot was" + + " recorded as a failure by another snapshot's cleanup run.", + snapshotId.getName() + ); + } - SnapshotLifecycleStats newStats; + final Set runningSnapshots = currentlyRunningSnapshots(currentState); + final List newRegistered = new ArrayList<>(); + for (PolicySnapshot snapshot : registeredSnapshots.getSnapshots()) { + if (snapshot.getSnapshotId().equals(snapshotId) == false) { + if (snapshot.getPolicy().equals(policyName)) { + if (runningSnapshots.contains(snapshot.getSnapshotId())) { + // Snapshot is for this policy and is still running so keep it in registered set + newRegistered.add(snapshot); + } else { + // Snapshot is for this policy but is not running so infer failure, update stats accordingly, + // and remove from registered set + newStats = newStats.withFailedIncremented(policyName); + newPolicyMetadata.incrementInvocationsSinceLastSuccess() + .setLastFailure(buildFailedSnapshotRecord(snapshot.getSnapshotId())); + } + } else if (snapLifecycles.containsKey(snapshot.getPolicy())) { + // Snapshot is for another policy so keep in the registered set and that policy deal with it + newRegistered.add(snapshot); + } + } + } + + // Add stats from the just completed snapshot execution if (exception.isPresent()) { - newStats = stats.withFailedIncremented(policyName); + newStats = newStats.withFailedIncremented(policyName); newPolicyMetadata.setLastFailure( new SnapshotInvocationRecord( - snapshotName, + snapshotId.getName(), null, snapshotFinishTime, exception.map(SnapshotLifecycleTask::exceptionToString).orElse(null) ) ); - newPolicyMetadata.setInvocationsSinceLastSuccess(policyMetadata.getInvocationsSinceLastSuccess() + 1L); + newPolicyMetadata.incrementInvocationsSinceLastSuccess(); } else { - newStats = stats.withTakenIncremented(policyName); - newPolicyMetadata.setLastSuccess(new SnapshotInvocationRecord(snapshotName, snapshotStartTime, snapshotFinishTime, null)); + newStats = newStats.withTakenIncremented(policyName); + newPolicyMetadata.setLastSuccess( + new SnapshotInvocationRecord(snapshotId.getName(), snapshotStartTime, snapshotFinishTime, null) + ); newPolicyMetadata.setInvocationsSinceLastSuccess(0L); } @@ -291,10 +347,11 @@ public ClusterState execute(ClusterState currentState) throws Exception { currentSLMMode(currentState), newStats ); - Metadata currentMeta = currentState.metadata(); - return ClusterState.builder(currentState) - .metadata(Metadata.builder(currentMeta).putCustom(SnapshotLifecycleMetadata.TYPE, lifecycleMetadata)) + Metadata newMeta = Metadata.builder(currentState.metadata()) + .putCustom(SnapshotLifecycleMetadata.TYPE, lifecycleMetadata) + .putCustom(RegisteredPolicySnapshots.TYPE, new RegisteredPolicySnapshots(newRegistered)) .build(); + return ClusterState.builder(currentState).metadata(newMeta).build(); } @Override @@ -302,7 +359,7 @@ public void onFailure(Exception e) { logger.error( "failed to record snapshot policy execution status [{}] for snapshot [{}] in policy [{}]: {}", exception.isPresent() ? "failure" : "success", - snapshotName, + snapshotId.getName(), policyName, e ); diff --git a/x-pack/plugin/slm/src/test/java/org/elasticsearch/xpack/slm/SnapshotLifecyclePolicyTests.java b/x-pack/plugin/slm/src/test/java/org/elasticsearch/xpack/slm/SnapshotLifecyclePolicyTests.java index 9e5265d91dc75..fc4ee7867ed04 100644 --- a/x-pack/plugin/slm/src/test/java/org/elasticsearch/xpack/slm/SnapshotLifecyclePolicyTests.java +++ b/x-pack/plugin/slm/src/test/java/org/elasticsearch/xpack/slm/SnapshotLifecyclePolicyTests.java @@ -49,7 +49,7 @@ public void testToRequest() { p = new SnapshotLifecyclePolicy("id", "name", "0 1 2 3 4 ? 2099", "repo", null, null); request = p.toRequest(TEST_REQUEST_TIMEOUT); - expected.waitForCompletion(true).snapshot(request.snapshot()).repository("repo"); + expected.waitForCompletion(true).snapshot(request.snapshot()).repository("repo").uuid(request.uuid()); assertEquals(expected, request); } diff --git a/x-pack/plugin/slm/src/test/java/org/elasticsearch/xpack/slm/SnapshotLifecycleTaskTests.java b/x-pack/plugin/slm/src/test/java/org/elasticsearch/xpack/slm/SnapshotLifecycleTaskTests.java index 2c698a0383add..0b273c307cf47 100644 --- a/x-pack/plugin/slm/src/test/java/org/elasticsearch/xpack/slm/SnapshotLifecycleTaskTests.java +++ b/x-pack/plugin/slm/src/test/java/org/elasticsearch/xpack/slm/SnapshotLifecycleTaskTests.java @@ -19,7 +19,10 @@ import org.elasticsearch.client.internal.Client; import org.elasticsearch.cluster.ClusterName; import org.elasticsearch.cluster.ClusterState; +import org.elasticsearch.cluster.SnapshotsInProgress; import org.elasticsearch.cluster.metadata.Metadata; +import org.elasticsearch.cluster.node.DiscoveryNodeUtils; +import org.elasticsearch.cluster.node.DiscoveryNodes; import org.elasticsearch.cluster.service.ClusterService; import org.elasticsearch.common.TriFunction; import org.elasticsearch.common.scheduler.SchedulerEngine; @@ -27,12 +30,15 @@ import org.elasticsearch.common.settings.Settings; import org.elasticsearch.common.util.set.Sets; import org.elasticsearch.core.Strings; +import org.elasticsearch.index.IndexVersion; import org.elasticsearch.index.shard.ShardId; +import org.elasticsearch.snapshots.RegisteredPolicySnapshots; import org.elasticsearch.snapshots.Snapshot; import org.elasticsearch.snapshots.SnapshotId; import org.elasticsearch.snapshots.SnapshotInfo; import org.elasticsearch.snapshots.SnapshotInfoUtils; import org.elasticsearch.snapshots.SnapshotShardFailure; +import org.elasticsearch.snapshots.SnapshotsService; import org.elasticsearch.test.ClusterServiceUtils; import org.elasticsearch.test.ESTestCase; import org.elasticsearch.test.client.NoOpClient; @@ -48,9 +54,11 @@ import org.elasticsearch.xpack.slm.history.SnapshotHistoryStore; import java.io.IOException; +import java.util.ArrayList; import java.util.Arrays; import java.util.Collections; import java.util.HashMap; +import java.util.List; import java.util.Map; import java.util.Optional; import java.util.Set; @@ -129,7 +137,7 @@ public void testSkipCreatingSnapshotWhenJobDoesNotMatch() { threadPool.shutdownNow(); } - public void testCreateSnapshotOnTrigger() { + public void testCreateSnapshotOnTrigger() throws Exception { final String id = randomAlphaOfLength(4); final SnapshotLifecyclePolicyMetadata slpm = makePolicyMeta(id); final SnapshotLifecycleMetadata meta = new SnapshotLifecycleMetadata( @@ -140,6 +148,12 @@ public void testCreateSnapshotOnTrigger() { final ClusterState state = ClusterState.builder(new ClusterName("test")) .metadata(Metadata.builder().putCustom(SnapshotLifecycleMetadata.TYPE, meta).build()) + .nodes( + DiscoveryNodes.builder() + .add(DiscoveryNodeUtils.builder("nodeId").name("nodeId").build()) + .localNodeId("nodeId") + .masterNodeId("nodeId") + ) .build(); final ThreadPool threadPool = new TestThreadPool("test"); @@ -220,11 +234,13 @@ public void testCreateSnapshotOnTrigger() { new SchedulerEngine.Event(SnapshotLifecycleService.getJobId(slpm), System.currentTimeMillis(), System.currentTimeMillis()) ); - assertTrue("snapshot should be triggered once", clientCalled.get()); - assertTrue("history store should be called once", historyStoreCalled.get()); + assertBusy(() -> { + assertTrue("snapshot should be triggered once", clientCalled.get()); + assertTrue("history store should be called once", historyStoreCalled.get()); + }); + } finally { + threadPool.shutdownNow(); } - - threadPool.shutdownNow(); } public void testPartialFailureSnapshot() throws Exception { @@ -238,6 +254,12 @@ public void testPartialFailureSnapshot() throws Exception { final ClusterState state = ClusterState.builder(new ClusterName("test")) .metadata(Metadata.builder().putCustom(SnapshotLifecycleMetadata.TYPE, meta).build()) + .nodes( + DiscoveryNodes.builder() + .add(DiscoveryNodeUtils.builder("nodeId").name("nodeId").build()) + .localNodeId("nodeId") + .masterNodeId("nodeId") + ) .build(); final ThreadPool threadPool = new TestThreadPool("test"); @@ -307,11 +329,252 @@ public void testPartialFailureSnapshot() throws Exception { new SchedulerEngine.Event(SnapshotLifecycleService.getJobId(slpm), System.currentTimeMillis(), System.currentTimeMillis()) ); - assertTrue("snapshot should be triggered once", clientCalled.get()); - assertTrue("history store should be called once", historyStoreCalled.get()); + assertBusy(() -> { + assertTrue("snapshot should be triggered once", clientCalled.get()); + assertTrue("history store should be called once", historyStoreCalled.get()); + }); + } finally { + threadPool.shutdownNow(); } + } - threadPool.shutdownNow(); + public void testDeletedPoliciesHaveRegisteredRemoved() throws Exception { + final String policyId = randomAlphaOfLength(10); + final SnapshotId initiatingSnap = randSnapshotId(); + + final String deletedPolicy = randomAlphaOfLength(10); + final SnapshotId snapForDeletedPolicy = randSnapshotId(); + + SnapshotLifecycleTask.WriteJobStatus writeJobStatus = randomBoolean() + ? SnapshotLifecycleTask.WriteJobStatus.success(policyId, initiatingSnap, randomLong(), randomLong()) + : SnapshotLifecycleTask.WriteJobStatus.failure(policyId, initiatingSnap, randomLong(), new RuntimeException()); + + // deletedPolicy is no longer defined + var definedSlmPolicies = List.of(policyId); + var registeredSnapshots = Map.of(policyId, List.of(initiatingSnap), deletedPolicy, List.of(snapForDeletedPolicy)); + // behavior is same whether initiatingSnap still in progress + var inProgress = Map.of(policyId, randomBoolean() ? List.of(initiatingSnap) : List.of()); + ClusterState clusterState = buildClusterState(definedSlmPolicies, registeredSnapshots, inProgress); + + ClusterState newClusterState = writeJobStatus.execute(clusterState); + RegisteredPolicySnapshots newRegisteredPolicySnapshots = newClusterState.metadata().custom(RegisteredPolicySnapshots.TYPE); + + assertEquals(List.of(), newRegisteredPolicySnapshots.getSnapshots()); + } + + public void testOtherDefinedPoliciesUneffected() throws Exception { + final String policyId = randomAlphaOfLength(10); + final SnapshotId initiatingSnap = randSnapshotId(); + + final String otherPolicy = randomAlphaOfLength(10); + final SnapshotId otherSnapRunning = randSnapshotId(); + final SnapshotId otherSnapNotRunning = randSnapshotId(); + + SnapshotLifecycleTask.WriteJobStatus writeJobStatus = randomBoolean() + ? SnapshotLifecycleTask.WriteJobStatus.success(policyId, initiatingSnap, randomLong(), randomLong()) + : SnapshotLifecycleTask.WriteJobStatus.failure(policyId, initiatingSnap, randomLong(), new RuntimeException()); + + var definedSlmPolicies = List.of(policyId, otherPolicy); + var registeredSnapshots = Map.of(policyId, List.of(initiatingSnap), otherPolicy, List.of(otherSnapRunning, otherSnapNotRunning)); + var inProgress = Map.of(policyId, List.of(), otherPolicy, List.of(otherSnapRunning)); + ClusterState clusterState = buildClusterState(definedSlmPolicies, registeredSnapshots, inProgress); + + ClusterState newClusterState = writeJobStatus.execute(clusterState); + RegisteredPolicySnapshots newRegisteredPolicySnapshots = newClusterState.metadata().custom(RegisteredPolicySnapshots.TYPE); + + assertEquals(List.of(otherSnapRunning, otherSnapNotRunning), newRegisteredPolicySnapshots.getSnapshotsByPolicy(otherPolicy)); + assertEquals(List.of(), newRegisteredPolicySnapshots.getSnapshotsByPolicy(policyId)); + } + + public void testInitiatingSnapRemovedButStillRunningRemains() throws Exception { + final String policyId = randomAlphaOfLength(10); + final SnapshotId initiatingSnap = randSnapshotId(); + + SnapshotLifecycleTask.WriteJobStatus writeJobStatus = randomBoolean() + ? SnapshotLifecycleTask.WriteJobStatus.success(policyId, initiatingSnap, randomLong(), randomLong()) + : SnapshotLifecycleTask.WriteJobStatus.failure(policyId, initiatingSnap, randomLong(), new RuntimeException()); + + final SnapshotId stillRunning = randSnapshotId(); + + var definedSlmPolicies = List.of(policyId); + var registeredSnapshots = Map.of(policyId, List.of(stillRunning, initiatingSnap)); + // behavior is same whether initiatingSnap still in progress + var inProgress = Map.of(policyId, randomBoolean() ? List.of(stillRunning, initiatingSnap) : List.of(stillRunning)); + ClusterState clusterState = buildClusterState(definedSlmPolicies, registeredSnapshots, inProgress); + + ClusterState newClusterState = writeJobStatus.execute(clusterState); + RegisteredPolicySnapshots newRegisteredPolicySnapshots = newClusterState.metadata().custom(RegisteredPolicySnapshots.TYPE); + + assertEquals(List.of(stillRunning), newRegisteredPolicySnapshots.getSnapshotsByPolicy(policyId)); + } + + public void testInferFailureInitiatedBySuccess() throws Exception { + final String policyId = randomAlphaOfLength(10); + final SnapshotId initiatingSnapshot = randSnapshotId(); + final SnapshotId previousFailedSnapshot = randSnapshotId(); + // currently running snapshots + final SnapshotId stillRunning = randSnapshotId(); + + var definedSlmPolicies = List.of(policyId); + var registeredSnapshots = Map.of(policyId, List.of(stillRunning, previousFailedSnapshot)); + var inProgress = Map.of(policyId, List.of(stillRunning)); + ClusterState clusterState = buildClusterState(definedSlmPolicies, registeredSnapshots, inProgress); + + var writeJobTask = SnapshotLifecycleTask.WriteJobStatus.success(policyId, initiatingSnapshot, randomLong(), randomLong()); + + ClusterState newClusterState = writeJobTask.execute(clusterState); + + // previous failure is now recorded in stats and metadata + SnapshotLifecycleMetadata newSlmMetadata = newClusterState.metadata().custom(SnapshotLifecycleMetadata.TYPE); + SnapshotLifecycleStats newStats = newSlmMetadata.getStats(); + SnapshotLifecycleStats.SnapshotPolicyStats snapshotPolicyStats = newStats.getMetrics().get(policyId); + assertEquals(1, snapshotPolicyStats.getSnapshotFailedCount()); + assertEquals(1, snapshotPolicyStats.getSnapshotTakenCount()); + + SnapshotLifecyclePolicyMetadata newSlmPolicyMetadata = newSlmMetadata.getSnapshotConfigurations().get(policyId); + assertEquals(previousFailedSnapshot.getName(), newSlmPolicyMetadata.getLastFailure().getSnapshotName()); + assertEquals(initiatingSnapshot.getName(), newSlmPolicyMetadata.getLastSuccess().getSnapshotName()); + assertEquals(0, newSlmPolicyMetadata.getInvocationsSinceLastSuccess()); + + // failed snapshot no longer in registeredSnapshot set + RegisteredPolicySnapshots newRegisteredPolicySnapshots = newClusterState.metadata().custom(RegisteredPolicySnapshots.TYPE); + List newRegisteredSnapIds = newRegisteredPolicySnapshots.getSnapshotsByPolicy(policyId); + assertEquals(List.of(stillRunning), newRegisteredSnapIds); + } + + public void testInferFailureInitiatedByFailure() throws Exception { + final String policyId = randomAlphaOfLength(10); + final SnapshotId initiatingSnapshot = randSnapshotId(); + final SnapshotId previousFailedSnapshot = randSnapshotId(); + final SnapshotId stillRunning = randSnapshotId(); + + var definedSlmPolicies = List.of(policyId); + var registeredSnapshots = Map.of(policyId, List.of(stillRunning, previousFailedSnapshot)); + var inProgress = Map.of(policyId, List.of(stillRunning)); + ClusterState clusterState = buildClusterState(definedSlmPolicies, registeredSnapshots, inProgress); + + var writeJobTask = SnapshotLifecycleTask.WriteJobStatus.failure(policyId, initiatingSnapshot, randomLong(), new RuntimeException()); + + ClusterState newClusterState = writeJobTask.execute(clusterState); + + // previous failure is now recorded in stats and metadata + SnapshotLifecycleMetadata newSlmMetadata = newClusterState.metadata().custom(SnapshotLifecycleMetadata.TYPE); + SnapshotLifecycleStats newStats = newSlmMetadata.getStats(); + SnapshotLifecycleStats.SnapshotPolicyStats snapshotPolicyStats = newStats.getMetrics().get(policyId); + assertEquals(2, snapshotPolicyStats.getSnapshotFailedCount()); + assertEquals(0, snapshotPolicyStats.getSnapshotTakenCount()); + + SnapshotLifecyclePolicyMetadata newSlmPolicyMetadata = newSlmMetadata.getSnapshotConfigurations().get(policyId); + assertEquals(initiatingSnapshot.getName(), newSlmPolicyMetadata.getLastFailure().getSnapshotName()); + assertNull(newSlmPolicyMetadata.getLastSuccess()); + assertEquals(2, newSlmPolicyMetadata.getInvocationsSinceLastSuccess()); + + // failed snapshot no longer in registeredSnapshot set + RegisteredPolicySnapshots newRegisteredPolicySnapshots = newClusterState.metadata().custom(RegisteredPolicySnapshots.TYPE); + List newRegisteredSnapIds = newRegisteredPolicySnapshots.getSnapshotsByPolicy(policyId); + assertEquals(List.of(stillRunning), newRegisteredSnapIds); + } + + public void testGetCurrentlyRunningSnapshots() { + final SnapshotId snapshot1 = randSnapshotId(); + final SnapshotId snapshot2 = randSnapshotId(); + final SnapshotId snapshot3 = randSnapshotId(); + final SnapshotId snapshot4 = randSnapshotId(); + + final String repo1 = randomAlphaOfLength(10); + final String repo2 = randomAlphaOfLength(10); + + final var snapshotsInProgress = SnapshotsInProgress.EMPTY.withUpdatedEntriesForRepo( + repo1, + List.of( + makeSnapshotInProgress(repo1, "some-policy", snapshot1), + makeSnapshotInProgress(repo1, "some-policy", snapshot2), + makeSnapshotInProgress(repo1, "other-policy", snapshot3) + ) + ).withUpdatedEntriesForRepo(repo2, List.of(makeSnapshotInProgress(repo2, "other-policy", snapshot4))); + + final ClusterState clusterState = ClusterState.builder(new ClusterName("cluster")) + .putCustom(SnapshotsInProgress.TYPE, snapshotsInProgress) + .build(); + + Set currentlyRunning = SnapshotLifecycleTask.currentlyRunningSnapshots(clusterState); + assertEquals(currentlyRunning, Set.of(snapshot1, snapshot2, snapshot3, snapshot4)); + } + + private static SnapshotId randSnapshotId() { + return new SnapshotId(randomAlphaOfLength(10), randomUUID()); + } + + private static ClusterState buildClusterState( + List slmPolicies, + Map> registeredSnaps, + Map> inProgress + ) { + final String repo = randomAlphaOfLength(10); + List inProgressEntries = new ArrayList<>(); + for (String policy : inProgress.keySet()) { + for (SnapshotId snap : inProgress.get(policy)) { + inProgressEntries.add(makeSnapshotInProgress(repo, policy, snap)); + } + } + + final List policySnapshots = new ArrayList<>(); + for (Map.Entry> policySnaps : registeredSnaps.entrySet()) { + for (SnapshotId snapshotId : policySnaps.getValue()) { + policySnapshots.add(new RegisteredPolicySnapshots.PolicySnapshot(policySnaps.getKey(), snapshotId)); + } + } + + final ClusterState clusterState = ClusterState.builder(new ClusterName("cluster")) + .putCustom(SnapshotsInProgress.TYPE, SnapshotsInProgress.EMPTY.withUpdatedEntriesForRepo(repo, inProgressEntries)) + .metadata( + Metadata.builder() + .putCustom(SnapshotLifecycleMetadata.TYPE, makeSnapMeta(slmPolicies)) + .putCustom(RegisteredPolicySnapshots.TYPE, new RegisteredPolicySnapshots(policySnapshots)) + ) + .build(); + + return clusterState; + } + + private static SnapshotLifecycleMetadata makeSnapMeta(List policies) { + Map slmMeta = new HashMap<>(); + + for (String policy : policies) { + SnapshotLifecyclePolicyMetadata slmPolicyMeta = SnapshotLifecyclePolicyMetadata.builder() + .setModifiedDate(randomLong()) + .setPolicy(new SnapshotLifecyclePolicy(policy, "snap", "", "repo-name", null, null)) + .build(); + slmMeta.put(policy, slmPolicyMeta); + } + + SnapshotLifecycleStats stats = new SnapshotLifecycleStats( + randomNonNegativeLong(), + randomNonNegativeLong(), + randomNonNegativeLong(), + randomNonNegativeLong(), + new HashMap<>() + ); + return new SnapshotLifecycleMetadata(slmMeta, OperationMode.RUNNING, stats); + } + + private static SnapshotsInProgress.Entry makeSnapshotInProgress(String repo, String policyId, SnapshotId snapshotId) { + final Map metadata = Map.of(SnapshotsService.POLICY_ID_METADATA_FIELD, policyId); + return SnapshotsInProgress.Entry.snapshot( + new Snapshot(repo, snapshotId), + randomBoolean(), + randomBoolean(), + SnapshotsInProgress.State.SUCCESS, + Map.of(), + List.of(), + List.of(), + randomNonNegativeLong(), + randomNonNegativeLong(), + Map.of(), + null, + metadata, + IndexVersion.current() + ); } /** From 11bea77d3ef8c336d1adce5be4286621d791ae73 Mon Sep 17 00:00:00 2001 From: Stef Nestor <26751266+stefnestor@users.noreply.github.com> Date: Fri, 2 Aug 2024 14:43:22 -0600 Subject: [PATCH 25/36] (Doc+) cluster.routing.allocation.enable effects going forward (#111557) * (Doc+) cluster.routing.allocation.enable effects going forward Noting setting e.g. `cluster.routing.allocation.enable: primaries` ([doc](https://www.elastic.co/guide/en/elasticsearch/reference/master/modules-cluster.html#cluster-shard-allocation-settings)) does not de-allocate existing replicas. Instead this setting affects allocations going forward. --------- Co-authored-by: shainaraskas <58563081+shainaraskas@users.noreply.github.com> --- docs/reference/modules/cluster/shards_allocation.asciidoc | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/docs/reference/modules/cluster/shards_allocation.asciidoc b/docs/reference/modules/cluster/shards_allocation.asciidoc index dc53837125ee9..dab5d61a792cb 100644 --- a/docs/reference/modules/cluster/shards_allocation.asciidoc +++ b/docs/reference/modules/cluster/shards_allocation.asciidoc @@ -15,7 +15,8 @@ Enable or disable allocation for specific kinds of shards: * `new_primaries` - Allows shard allocation only for primary shards for new indices. * `none` - No shard allocations of any kind are allowed for any indices. -This setting does not affect the recovery of local primary shards when +This setting only affects future allocations, and does not re-allocate or un-allocate currently allocated shards. +It also does not affect the recovery of local primary shards when restarting a node. A restarted node that has a copy of an unassigned primary shard will recover that primary immediately, assuming that its allocation id matches one of the active allocation ids in the cluster state. From ece555ecf2c574340922cd37e742049784045c0d Mon Sep 17 00:00:00 2001 From: Athena Brown Date: Fri, 2 Aug 2024 14:50:22 -0600 Subject: [PATCH 26/36] Improve security-crypto threadpool overflow handling (#111369) Prior to this PR, when the security-crypto threadpool queue overflows and rejects API key hashing submissions, a toxic value (specifically, a future which will never be completed) is added to the API key auth cache. This toxic cache value causes future authentication attempts with that API key to fail by timeout, because they will attempt to wait for the toxic future, until that value is invalidated and removed from the cache. Additionally, this will hold on to memory for each request that waits on the toxic future, even after the request has timed out. This PR adds a unit test to replicate this case, and adjusts the code which submits the key hashing task to the security-crypto threadpool to properly handle this point of failure by invalidating the cached future and notifying waiting handlers that the computation has failed. --- docs/changelog/111369.yaml | 5 ++ .../xpack/security/authc/ApiKeyService.java | 13 ++- .../security/authc/ApiKeyServiceTests.java | 90 ++++++++++++++++++- 3 files changed, 106 insertions(+), 2 deletions(-) create mode 100644 docs/changelog/111369.yaml diff --git a/docs/changelog/111369.yaml b/docs/changelog/111369.yaml new file mode 100644 index 0000000000000..1a638abea4e1d --- /dev/null +++ b/docs/changelog/111369.yaml @@ -0,0 +1,5 @@ +pr: 111369 +summary: Improve security-crypto threadpool overflow handling +area: Authentication +type: bug +issues: [] diff --git a/x-pack/plugin/security/src/main/java/org/elasticsearch/xpack/security/authc/ApiKeyService.java b/x-pack/plugin/security/src/main/java/org/elasticsearch/xpack/security/authc/ApiKeyService.java index aaa1841bd2354..d88577f905e96 100644 --- a/x-pack/plugin/security/src/main/java/org/elasticsearch/xpack/security/authc/ApiKeyService.java +++ b/x-pack/plugin/security/src/main/java/org/elasticsearch/xpack/security/authc/ApiKeyService.java @@ -1315,7 +1315,18 @@ void validateApiKeyCredentials( AuthenticationResult.unsuccessful("invalid credentials for API key [" + credentials.getId() + "]", null) ); } - }, listener::onFailure)); + }, exception -> { + // Crypto threadpool queue is full, invalidate this cache entry and make sure nothing is going to wait on it + logger.warn( + Strings.format( + "rejecting possibly valid API key authentication because the [%s] threadpool is full", + SECURITY_CRYPTO_THREAD_POOL_NAME + ) + ); + apiKeyAuthCache.invalidate(credentials.getId(), listenableCacheEntry); + listenableCacheEntry.onFailure(exception); + listener.onFailure(exception); + })); } } else { verifyKeyAgainstHash(apiKeyDoc.hash, credentials, ActionListener.wrap(verified -> { diff --git a/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/authc/ApiKeyServiceTests.java b/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/authc/ApiKeyServiceTests.java index 1dce6a038638b..f4d75434b92de 100644 --- a/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/authc/ApiKeyServiceTests.java +++ b/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/authc/ApiKeyServiceTests.java @@ -146,6 +146,7 @@ import java.util.Map; import java.util.Set; import java.util.concurrent.CompletableFuture; +import java.util.concurrent.CyclicBarrier; import java.util.concurrent.ExecutionException; import java.util.concurrent.ExecutorService; import java.util.concurrent.Semaphore; @@ -230,6 +231,9 @@ public class ApiKeyServiceTests extends ESTestCase { "search": [ {"names": ["logs"]} ], "replication": [ {"names": ["archive"]} ] }"""); + + private static final int TEST_THREADPOOL_QUEUE_SIZE = 1000; + private ThreadPool threadPool; private Client client; private SecurityIndexManager securityIndex; @@ -245,7 +249,7 @@ public void createThreadPool() { Settings.EMPTY, SECURITY_CRYPTO_THREAD_POOL_NAME, 1, - 1000, + TEST_THREADPOOL_QUEUE_SIZE, "xpack.security.crypto.thread_pool", EsExecutors.TaskTrackingConfig.DO_NOT_TRACK ) @@ -268,6 +272,90 @@ public void setupMocks() { doAnswer(invocation -> Instant.now()).when(clock).instant(); } + public void testFloodThreadpool() throws Exception { + // We're going to be blocking the security-crypto threadpool so we need a new one for the client + ThreadPool clientThreadpool = new TestThreadPool( + this.getTestName(), + new FixedExecutorBuilder( + Settings.EMPTY, + this.getTestName(), + 1, + 100, + "no_settings_used", + EsExecutors.TaskTrackingConfig.DO_NOT_TRACK + ) + ); + try { + when(client.threadPool()).thenReturn(clientThreadpool); + + // setup copied from testAuthenticateWithApiKey + final Settings settings = Settings.builder().put(XPackSettings.API_KEY_SERVICE_ENABLED_SETTING.getKey(), true).build(); + final ApiKeyService service = createApiKeyService(settings); + + final String id = randomAlphaOfLength(12); + final String key = randomAlphaOfLength(16); + + final User user, authUser; + if (randomBoolean()) { + user = new User("hulk", new String[] { "superuser" }, "Bruce Banner", "hulk@test.com", Map.of(), true); + authUser = new User("authenticated_user", "other"); + } else { + user = new User("hulk", new String[] { "superuser" }, "Bruce Banner", "hulk@test.com", Map.of(), true); + authUser = null; + } + final ApiKey.Type type = randomFrom(ApiKey.Type.values()); + final Map metadata = mockKeyDocument(id, key, user, authUser, false, Duration.ofSeconds(3600), null, type); + + // Block the security crypto threadpool + CyclicBarrier barrier = new CyclicBarrier(2); + threadPool.executor(SECURITY_CRYPTO_THREAD_POOL_NAME).execute(() -> safeAwait(barrier)); + // Now fill it up while the one thread is blocked + for (int i = 0; i < TEST_THREADPOOL_QUEUE_SIZE; i++) { + threadPool.executor(SECURITY_CRYPTO_THREAD_POOL_NAME).execute(() -> {}); + } + + // Check that it's full + for (var stat : threadPool.stats().stats()) { + if (stat.name().equals(SECURITY_CRYPTO_THREAD_POOL_NAME)) { + assertThat(stat.queue(), equalTo(TEST_THREADPOOL_QUEUE_SIZE)); + assertThat(stat.rejected(), equalTo(0L)); + } + } + + // now try to auth with an API key + final AuthenticationResult auth = tryAuthenticate(service, id, key, type); + assertThat(auth.getStatus(), is(AuthenticationResult.Status.TERMINATE)); + + // Make sure one was rejected and the queue is still full + for (var stat : threadPool.stats().stats()) { + if (stat.name().equals(SECURITY_CRYPTO_THREAD_POOL_NAME)) { + assertThat(stat.queue(), equalTo(TEST_THREADPOOL_QUEUE_SIZE)); + assertThat(stat.rejected(), equalTo(1L)); + } + } + ListenableFuture cachedValue = service.getApiKeyAuthCache().get(id); + assertThat("since the request was rejected, there should be no cache entry for this key", cachedValue, nullValue()); + + // unblock the threadpool + safeAwait(barrier); + + // wait for the threadpool queue to drain & check that the stats as as expected + flushThreadPoolExecutor(threadPool, SECURITY_CRYPTO_THREAD_POOL_NAME); + for (var stat : threadPool.stats().stats()) { + if (stat.name().equals(SECURITY_CRYPTO_THREAD_POOL_NAME)) { + assertThat(stat.rejected(), equalTo(1L)); + assertThat(stat.queue(), equalTo(0)); + } + } + + // try to authenticate again with the same key - if this hangs, check the future caching + final AuthenticationResult shouldSucceed = tryAuthenticate(service, id, key, type); + assertThat(shouldSucceed.getStatus(), is(AuthenticationResult.Status.SUCCESS)); + } finally { + terminate(clientThreadpool); + } + } + public void testCreateApiKeyUsesBulkIndexAction() throws Exception { final Settings settings = Settings.builder().put(XPackSettings.API_KEY_SERVICE_ENABLED_SETTING.getKey(), true).build(); final ApiKeyService service = createApiKeyService(settings); From 27c80d5e7289359fd6f4573ce9c293f5ddfbf986 Mon Sep 17 00:00:00 2001 From: James Baiera Date: Fri, 2 Aug 2024 16:58:56 -0400 Subject: [PATCH 27/36] Limit stack trace size on failure store documents (#111106) This PR adds a new function to ExceptionsHelper that constructs a "limited" stack trace. This stack trace eschews module information from the stack frames, compresses all package names to be singular letters, and limits the number of stack frames displayed to a specified maximum --- .../190_failure_store_redirection.yml | 3 +- .../org/elasticsearch/ExceptionsHelper.java | 187 ++++++++++++ .../bulk/FailureStoreDocumentConverter.java | 4 +- .../elasticsearch/ExceptionsHelperTests.java | 272 ++++++++++++++++++ .../FailureStoreDocumentConverterTests.java | 4 +- 5 files changed, 465 insertions(+), 5 deletions(-) diff --git a/modules/data-streams/src/yamlRestTest/resources/rest-api-spec/test/data_stream/190_failure_store_redirection.yml b/modules/data-streams/src/yamlRestTest/resources/rest-api-spec/test/data_stream/190_failure_store_redirection.yml index 54ce32eb13207..0b3007021cad8 100644 --- a/modules/data-streams/src/yamlRestTest/resources/rest-api-spec/test/data_stream/190_failure_store_redirection.yml +++ b/modules/data-streams/src/yamlRestTest/resources/rest-api-spec/test/data_stream/190_failure_store_redirection.yml @@ -123,7 +123,7 @@ teardown: - match: { hits.hits.0._source.document.source.foo: 'bar' } - match: { hits.hits.0._source.error.type: 'fail_processor_exception' } - match: { hits.hits.0._source.error.message: 'error_message' } - - contains: { hits.hits.0._source.error.stack_trace: 'org.elasticsearch.ingest.common.FailProcessorException: error_message' } + - contains: { hits.hits.0._source.error.stack_trace: 'error_message' } - length: { hits.hits.0._source.error.pipeline_trace: 2 } - match: { hits.hits.0._source.error.pipeline_trace.0: 'parent_failing_pipeline' } - match: { hits.hits.0._source.error.pipeline_trace.1: 'failing_pipeline' } @@ -207,7 +207,6 @@ teardown: - match: { hits.hits.0._source.error.type: 'document_parsing_exception' } - contains: { hits.hits.0._source.error.message: "failed to parse field [count] of type [long] in document with id " } - contains: { hits.hits.0._source.error.message: "Preview of field's value: 'invalid value'" } - - contains: { hits.hits.0._source.error.stack_trace: "org.elasticsearch.index.mapper.DocumentParsingException: " } - contains: { hits.hits.0._source.error.stack_trace: "failed to parse field [count] of type [long] in document with id" } - contains: { hits.hits.0._source.error.stack_trace: "Preview of field's value: 'invalid value'" } diff --git a/server/src/main/java/org/elasticsearch/ExceptionsHelper.java b/server/src/main/java/org/elasticsearch/ExceptionsHelper.java index b67b59aeee076..3e109fb1600b9 100644 --- a/server/src/main/java/org/elasticsearch/ExceptionsHelper.java +++ b/server/src/main/java/org/elasticsearch/ExceptionsHelper.java @@ -97,6 +97,193 @@ public static String stackTrace(Throwable e) { return stackTraceStringWriter.toString(); } + /** + * Constructs a limited and compressed stack trace string. Each exception printed as part of the full stack trace will have its printed + * stack frames capped at the given trace depth. Stack traces that are longer than the given trace depth will summarize the count of the + * remaining frames at the end of the trace. Each stack frame omits the module information and limits the package names to single + * characters per part. + *

+ * An example result when using a trace depth of 2 and one nested cause: + *


+     * o.e.s.GenericException: some generic exception!
+     *   at o.e.s.SomeClass.method(SomeClass.java:100)
+     *   at o.e.s.SomeOtherClass.earlierMethod(SomeOtherClass.java:24)
+     *   ... 5 more
+     * Caused by: o.e.s.GenericException: some other generic exception!
+     *   at o.e.s.SomeClass.method(SomeClass.java:115)
+     *   at o.e.s.SomeOtherClass.earlierMethod(SomeOtherClass.java:16)
+     *   ... 12 more
+     * 
+ * + * @param e Throwable object to construct a printed stack trace for + * @param traceDepth The maximum number of stack trace elements to display per exception referenced + * @return A string containing a limited and compressed stack trace. + */ + public static String limitedStackTrace(Throwable e, int traceDepth) { + assert traceDepth >= 0 : "Cannot print stacktraces with negative trace depths"; + StringWriter stackTraceStringWriter = new StringWriter(); + PrintWriter printWriter = new PrintWriter(stackTraceStringWriter); + printLimitedStackTrace(e, printWriter, traceDepth); + return stackTraceStringWriter.toString(); + } + + /** Caption for labeling causative exception stack traces */ + private static final String CAUSE_CAPTION = "Caused by: "; + /** Caption for labeling suppressed exception stack traces */ + private static final String SUPPRESSED_CAPTION = "Suppressed: "; + + private static void printLimitedStackTrace(Throwable e, PrintWriter s, int maxLines) { + // Guard against malicious overrides of Throwable.equals by + // using a Set with identity equality semantics. + Set dejaVu = Collections.newSetFromMap(new IdentityHashMap<>()); + dejaVu.add(e); + + // Print our stack trace + s.println(compressExceptionMessage(e)); + StackTraceElement[] trace = e.getStackTrace(); + int linesPrinted = 0; + for (StackTraceElement traceElement : trace) { + if (linesPrinted >= maxLines) { + break; + } else { + s.println(compressStackTraceElement(new StringBuilder("\tat "), traceElement)); + linesPrinted++; + } + } + if (trace.length > linesPrinted) { + s.println("\t... " + (trace.length - linesPrinted) + " more"); + } + + // Print suppressed exceptions, if any + for (Throwable se : e.getSuppressed()) { + limitAndPrintEnclosedStackTrace(se, s, trace, SUPPRESSED_CAPTION, "\t", maxLines, dejaVu); + } + + // Print cause, if any + Throwable ourCause = e.getCause(); + if (ourCause != null) { + limitAndPrintEnclosedStackTrace(ourCause, s, trace, CAUSE_CAPTION, "", maxLines, dejaVu); + } + } + + private static void limitAndPrintEnclosedStackTrace( + Throwable e, + PrintWriter s, + StackTraceElement[] enclosingTrace, + String caption, + String prefix, + int maxLines, + Set dejaVu + ) { + if (dejaVu.contains(e)) { + s.println(prefix + caption + "[CIRCULAR REFERENCE: " + compressExceptionMessage(e) + "]"); + } else { + dejaVu.add(e); + // Compute number of frames in common between this and enclosing trace + StackTraceElement[] trace = e.getStackTrace(); + int m = trace.length - 1; + int n = enclosingTrace.length - 1; + while (m >= 0 && n >= 0 && trace[m].equals(enclosingTrace[n])) { + m--; + n--; + } + int framesInCommon = trace.length - 1 - m; + + // Instead of breaking out of the print loop below when it reaches the maximum + // print lines, we simply cap how many frames we plan on printing here. + int linesToPrint = m + 1; + if (linesToPrint > maxLines) { + // The print loop below is "<=" based instead of "<", so subtract + // one from the max lines to convert a count value to an array index + // value and avoid an off by one error. + m = maxLines - 1; + framesInCommon = trace.length - 1 - m; + } + + // Print our stack trace + s.println(prefix + caption + compressExceptionMessage(e)); + for (int i = 0; i <= m; i++) { + s.println(compressStackTraceElement(new StringBuilder(prefix).append("\tat "), trace[i])); + } + if (framesInCommon != 0) { + s.println(prefix + "\t... " + framesInCommon + " more"); + } + + // Print suppressed exceptions, if any + for (Throwable se : e.getSuppressed()) { + limitAndPrintEnclosedStackTrace(se, s, trace, SUPPRESSED_CAPTION, prefix + "\t", maxLines, dejaVu); + } + + // Print cause, if any + Throwable ourCause = e.getCause(); + if (ourCause != null) { + limitAndPrintEnclosedStackTrace(ourCause, s, trace, CAUSE_CAPTION, prefix, maxLines, dejaVu); + } + } + } + + private static String compressExceptionMessage(Throwable e) { + StringBuilder msg = new StringBuilder(); + compressPackages(msg, e.getClass().getName()); + String message = e.getLocalizedMessage(); + if (message != null) { + msg.append(": ").append(message); + } + return msg.toString(); + } + + private static StringBuilder compressStackTraceElement(StringBuilder s, final StackTraceElement stackTraceElement) { + String declaringClass = stackTraceElement.getClassName(); + compressPackages(s, declaringClass); + + String methodName = stackTraceElement.getMethodName(); + s.append(".").append(methodName).append("("); + + if (stackTraceElement.isNativeMethod()) { + s.append("Native Method)"); + } else { + String fileName = stackTraceElement.getFileName(); + int lineNumber = stackTraceElement.getLineNumber(); + if (fileName != null && lineNumber >= 0) { + s.append(fileName).append(":").append(lineNumber).append(")"); + } else if (fileName != null) { + s.append(fileName).append(")"); + } else { + s.append("Unknown Source)"); + } + } + return s; + } + + // Visible for testing + static void compressPackages(StringBuilder s, String className) { + assert s != null : "s cannot be null"; + assert className != null : "className cannot be null"; + int finalDot = className.lastIndexOf('.'); + if (finalDot < 0) { + s.append(className); + return; + } + int lastPackageName = className.lastIndexOf('.', finalDot - 1); + if (lastPackageName < 0) { + if (finalDot >= 1) { + s.append(className.charAt(0)).append('.'); + } + s.append(className.substring(finalDot + 1)); + return; + } + boolean firstChar = true; + char[] charArray = className.toCharArray(); + for (int idx = 0; idx <= lastPackageName + 1; idx++) { + char c = charArray[idx]; + if (firstChar && '.' != c) { + s.append(c).append('.'); + } + firstChar = '.' == c; + } + s.append(className.substring(finalDot + 1)); + } + public static String formatStackTrace(final StackTraceElement[] stackTrace) { return Arrays.stream(stackTrace).skip(1).map(e -> "\tat " + e).collect(Collectors.joining("\n")); } diff --git a/server/src/main/java/org/elasticsearch/action/bulk/FailureStoreDocumentConverter.java b/server/src/main/java/org/elasticsearch/action/bulk/FailureStoreDocumentConverter.java index cc9f9b8ee1ce7..962e844529125 100644 --- a/server/src/main/java/org/elasticsearch/action/bulk/FailureStoreDocumentConverter.java +++ b/server/src/main/java/org/elasticsearch/action/bulk/FailureStoreDocumentConverter.java @@ -32,6 +32,8 @@ */ public class FailureStoreDocumentConverter { + private static final int STACKTRACE_PRINT_DEPTH = 2; + private static final Set INGEST_EXCEPTION_HEADERS = Set.of( PIPELINE_ORIGIN_EXCEPTION_HEADER, PROCESSOR_TAG_EXCEPTION_HEADER, @@ -109,7 +111,7 @@ private static XContentBuilder createSource( { builder.field("type", ElasticsearchException.getExceptionName(unwrapped)); builder.field("message", unwrapped.getMessage()); - builder.field("stack_trace", ExceptionsHelper.stackTrace(unwrapped)); + builder.field("stack_trace", ExceptionsHelper.limitedStackTrace(unwrapped, STACKTRACE_PRINT_DEPTH)); // Try to find the IngestProcessorException somewhere in the stack trace. Since IngestProcessorException is package-private, // we can't instantiate it in tests, so we'll have to check for the headers directly. var ingestException = ExceptionsHelper.unwrapCausesAndSuppressed( diff --git a/server/src/test/java/org/elasticsearch/ExceptionsHelperTests.java b/server/src/test/java/org/elasticsearch/ExceptionsHelperTests.java index 744c0fafedb46..8f3d84b5e90da 100644 --- a/server/src/test/java/org/elasticsearch/ExceptionsHelperTests.java +++ b/server/src/test/java/org/elasticsearch/ExceptionsHelperTests.java @@ -221,4 +221,276 @@ public void testCauseCycle() { ExceptionsHelper.unwrap(e1, IOException.class); ExceptionsHelper.unwrapCorruption(e1); } + + public void testLimitedStackTrace() { + // A normal exception is thrown several stack frames down + int maxTraces = between(0, 5); + RuntimeException exception = recurseAndCatchRuntime(randomIntBetween(10, 15), ExceptionsHelperTests::throwRegularException); + String limitedTrace = ExceptionsHelper.limitedStackTrace(exception, maxTraces); + int expectedLength = 1 + maxTraces + 1; // Exception message, traces, then the count of remaining traces + assertThat(limitedTrace.split("\n").length, equalTo(expectedLength)); + } + + public void testLimitedStackTraceShortened() { + // An exception has a smaller trace than is requested + // Set max traces very, very high, since this test is sensitive to the number of method calls on the thread's stack. + int maxTraces = 5000; + RuntimeException exception = new RuntimeException("Regular Exception"); + String limitedTrace = ExceptionsHelper.limitedStackTrace(exception, maxTraces); + String fullTrace = ExceptionsHelper.stackTrace(exception); + int expectedLength = fullTrace.split("\n").length; // The resulting line count should not be reduced + assertThat(limitedTrace.split("\n").length, equalTo(expectedLength)); + } + + public void testLimitedStackTraceWrappedExceptions() { + // An exception is thrown and is then wrapped several stack frames later + int maxTraces = between(0, 5); + RuntimeException exception = recurseAndCatchRuntime( + randomIntBetween(10, 15), + () -> throwExceptionCausedBy(recurseAndCatchRuntime(randomIntBetween(10, 15), ExceptionsHelperTests::throwRegularException)) + ); + String limitedTrace = ExceptionsHelper.limitedStackTrace(exception, maxTraces); + // (1) Exception message, (n) traces, (1) remaining traces, (1) caused by, (n) caused by traces, (1) remaining traces + int expectedLength = 4 + (2 * maxTraces); + assertThat(limitedTrace.split("\n").length, equalTo(expectedLength)); + } + + public void testLimitedStackTraceSuppressingAnException() { + // A normal exception is thrown several stack frames down and then suppresses a new exception on the way back up + int maxTraces = between(0, 5); + RuntimeException exception = recurseAndCatchRuntime(randomIntBetween(10, 15), () -> { + RuntimeException original = recurseAndCatchRuntime(randomIntBetween(10, 15), ExceptionsHelperTests::throwRegularException); + recurseUntil(randomIntBetween(10, 15), () -> suppressNewExceptionUnder(original)); + }); + String limitedTrace = ExceptionsHelper.limitedStackTrace(exception, maxTraces); + // (1) Exception message, (n) traces, (1) remaining traces, then + // (1) suppressed, (n) suppressed by traces, (1) remaining lines + int expectedLength = 4 + (2 * maxTraces); + assertThat(limitedTrace, limitedTrace.split("\n").length, equalTo(expectedLength)); + } + + public void testLimitedStackTraceSuppressedByAnException() { + // A normal exception is thrown several stack frames down and then gets suppressed on the way back up by a new exception + int maxTraces = between(0, 5); + RuntimeException exception = recurseAndCatchRuntime(randomIntBetween(10, 15), () -> { + RuntimeException original = recurseAndCatchRuntime(randomIntBetween(10, 15), ExceptionsHelperTests::throwRegularException); + recurseUntil(randomIntBetween(10, 15), () -> throwNewExceptionThatSuppresses(original)); + }); + String limitedTrace = ExceptionsHelper.limitedStackTrace(exception, maxTraces); + // (1) Exception message, (n) traces, (1) remaining traces, then + // (1) suppressed original exception, (n) suppressed traces, (1) remaining traces + int expectedLength = 4 + (2 * maxTraces); + assertThat(limitedTrace, limitedTrace.split("\n").length, equalTo(expectedLength)); + } + + public void testLimitedStackTraceSuppressingAnExceptionWithACause() { + // A normal exception is thrown several stack frames down. On the way back up, a new exception with a nested cause is + // suppressed by it. + int maxTraces = between(0, 5); + RuntimeException exception = recurseAndCatchRuntime(randomIntBetween(10, 15), () -> { + RuntimeException original = recurseAndCatchRuntime(randomIntBetween(10, 15), ExceptionsHelperTests::throwRegularException); + RuntimeException causedBy = recurseAndCatchRuntime(randomIntBetween(10, 15), ExceptionsHelperTests::throwRegularException); + recurseUntil(randomIntBetween(10, 15), () -> suppressNewExceptionWithCauseUnder(original, causedBy)); + }); + String limitedTrace = ExceptionsHelper.limitedStackTrace(exception, maxTraces); + // (1) Exception message, (n) traces, (1) remaining traces, then + // (1) suppressed exception, (n) suppressed traces, (1) remaining traces + // (1) suppressed caused by exception, (n) traces, (1) remaining traces + int expectedLength = 6 + (3 * maxTraces); + assertThat(limitedTrace, limitedTrace.split("\n").length, equalTo(expectedLength)); + } + + public void testLimitedStackTraceSuppressedByAnExceptionWithACause() { + // A normal exception is thrown several stack frames down. On the way back up, a new exception with a nested cause + // suppresses it. + int maxTraces = between(0, 5); + RuntimeException exception = recurseAndCatchRuntime(randomIntBetween(10, 15), () -> { + RuntimeException original = recurseAndCatchRuntime(randomIntBetween(10, 15), ExceptionsHelperTests::throwRegularException); + RuntimeException causedBy = recurseAndCatchRuntime(randomIntBetween(10, 15), ExceptionsHelperTests::throwRegularException); + recurseUntil(randomIntBetween(10, 15), () -> throwNewExceptionWithCauseThatSuppresses(original, causedBy)); + }); + String limitedTrace = ExceptionsHelper.limitedStackTrace(exception, maxTraces); + // (1) Exception message, (n) traces, (1) remaining traces, then + // (1) suppressed original exception, (n) traces, (1) remaining traces, then + // (1) caused by exception, (n) traces, (1) remaining traces + int expectedLength = 6 + (3 * maxTraces); + assertThat(limitedTrace, limitedTrace.split("\n").length, equalTo(expectedLength)); + } + + public void testLimitedStackTraceWrappedAndSuppressingAWrappedException() { + // A normal exception is thrown several stack frames down. It gets wrapped on the way back up. + // Some "recovery" code runs and a new exception is thrown. It also gets wrapped on the way back up. + // The first chain of exceptions suppresses the second. + int maxTraces = between(0, 5); + RuntimeException exception = recurseAndCatchRuntime(randomIntBetween(10, 15), () -> { + RuntimeException original = recurseAndCatchRuntime( + randomIntBetween(10, 15), + () -> throwExceptionCausedBy(recurseAndCatchRuntime(randomIntBetween(10, 15), ExceptionsHelperTests::throwRegularException)) + ); + RuntimeException causedBy = recurseAndCatchRuntime( + randomIntBetween(10, 15), + () -> throwExceptionCausedBy(recurseAndCatchRuntime(randomIntBetween(10, 15), ExceptionsHelperTests::throwRegularException)) + ); + recurseUntil(randomIntBetween(10, 15), () -> suppressNewExceptionWithCauseUnder(original, causedBy)); + }); + String limitedTrace = ExceptionsHelper.limitedStackTrace(exception, maxTraces); + // (1) wrapped exception message, (n) traces, (1) remaining traces, then + // (1) suppressed exception, (n) suppressed traces, (1) remaining traces, then + // (1) wrapped exception under suppressed exception, (n) traces, (1) remaining traces, then + // (1) root cause of suppressed exception chain, (n) traces, (1) remaining traces, then + // (1) root cause of the suppressing exception chain, (n) traces, (1) remaining traces + int expectedLength = 10 + (5 * maxTraces); + assertThat(limitedTrace, limitedTrace.split("\n").length, equalTo(expectedLength)); + } + + public void testLimitedStackTraceWrappedAndSuppressedByAWrappedException() { + // A normal exception is thrown several stack frames down. It gets wrapped on the way back up. + // Some "recovery" code runs and a new exception is thrown. It also gets wrapped on the way back up. + // The first chain of exceptions suppresses the second. + int maxTraces = between(0, 5); + RuntimeException exception = recurseAndCatchRuntime(randomIntBetween(10, 15), () -> { + RuntimeException original = recurseAndCatchRuntime( + randomIntBetween(10, 15), + () -> throwExceptionCausedBy(recurseAndCatchRuntime(randomIntBetween(10, 15), ExceptionsHelperTests::throwRegularException)) + ); + RuntimeException causedBy = recurseAndCatchRuntime( + randomIntBetween(10, 15), + () -> throwExceptionCausedBy(recurseAndCatchRuntime(randomIntBetween(10, 15), ExceptionsHelperTests::throwRegularException)) + ); + throwNewExceptionWithCauseThatSuppresses(original, causedBy); + }); + String limitedTrace = ExceptionsHelper.limitedStackTrace(exception, maxTraces); + // (1) wrapped exception message, (n) traces, (1) remaining traces, then + // (1) suppressed wrapped exception, (n) traces, (1) remaining traces, then + // (1) root cause of suppressed exception chain, (n) traces, (1) remaining traces, then + // (1) wrapped exception, (n) traces, (1) remaining traces, then + // (1) root cause of exception chain, (n) traces, (1) remaining traces + int expectedLength = 10 + (5 * maxTraces); + assertThat(limitedTrace, limitedTrace.split("\n").length, equalTo(expectedLength)); + } + + public void testLimitedStackTraceNestedSuppression() { + // The original exception is thrown and is then repeatedly suppressed by new exceptions + int maxTraces = between(0, 5); + RuntimeException exception = recurseAndCatchRuntime( + randomIntBetween(10, 15), + () -> throwNewExceptionWithCauseThatSuppresses( + recurseAndCatchRuntime( + randomIntBetween(10, 15), + () -> throwNewExceptionWithCauseThatSuppresses( + recurseAndCatchRuntime(randomIntBetween(10, 15), ExceptionsHelperTests::throwRegularException), + recurseAndCatchRuntime(randomIntBetween(10, 15), ExceptionsHelperTests::throwRegularException) + ) + ), + recurseAndCatchRuntime(randomIntBetween(10, 15), ExceptionsHelperTests::throwRegularException) + ) + ); + String limitedTrace = ExceptionsHelper.limitedStackTrace(exception, maxTraces); + // (1) first suppressing exception message, (n) traces, (1) remaining traces, then + // (1) second suppressing exception message, (n) traces, (1) remaining traces, then + // (1) suppressed original exception message, (n) traces, (1) remaining traces, then + // (1) cause of second suppressed exception, (n) traces, (1) remaining traces, then + // (1) cause of first suppressed exception, (n) traces, (1) remaining traces, then + int expectedLength = 10 + (5 * maxTraces); + assertThat(limitedTrace, limitedTrace.split("\n").length, equalTo(expectedLength)); + } + + public void testLimitedStackTraceCircularCause() { + // An exception is thrown and then suppresses itself + int maxTraces = between(0, 5); + RuntimeException exception1 = recurseAndCatchRuntime(randomIntBetween(10, 15), ExceptionsHelperTests::throwRegularException); + RuntimeException exception2 = recurseAndCatchRuntime(randomIntBetween(10, 15), ExceptionsHelperTests::throwRegularException); + exception1.initCause(exception2); + exception2.initCause(exception1); + String limitedTrace = ExceptionsHelper.limitedStackTrace(exception1, maxTraces); + // (1) first exception message, (n) traces, (1) remaining traces, then + // (1) caused by second exception, (n) traces, (1) remaining traces, then + // (1) caused by first exception message again, but no further traces + int expectedLength = 5 + (2 * maxTraces); + assertThat(limitedTrace, limitedTrace.split("\n").length, equalTo(expectedLength)); + } + + public void testLimitedStackTraceCircularSuppression() { + // An exception is thrown and then suppresses itself + int maxTraces = between(0, 5); + RuntimeException exception1 = recurseAndCatchRuntime(randomIntBetween(10, 15), ExceptionsHelperTests::throwRegularException); + RuntimeException exception2 = recurseAndCatchRuntime(randomIntBetween(10, 15), ExceptionsHelperTests::throwRegularException); + exception1.addSuppressed(exception2); + exception2.addSuppressed(exception1); + String limitedTrace = ExceptionsHelper.limitedStackTrace(exception1, maxTraces); + // (1) first exception message, (n) traces, (1) remaining traces, then + // (1) suppressed second exception, (n) traces, (1) remaining traces, then + // (1) suppressed first exception message again, but no further traces + int expectedLength = 5 + (2 * maxTraces); + assertThat(limitedTrace, limitedTrace.split("\n").length, equalTo(expectedLength)); + } + + private static void throwRegularException() { + throw new RuntimeException("Regular Exception"); + } + + private static void throwExceptionCausedBy(RuntimeException causedBy) { + throw new RuntimeException("Wrapping Exception", causedBy); + } + + private static void suppressNewExceptionUnder(RuntimeException suppressor) { + suppressor.addSuppressed(new RuntimeException("Suppressed Exception")); + throw suppressor; + } + + private static void throwNewExceptionThatSuppresses(RuntimeException suppressed) { + RuntimeException priority = new RuntimeException("Priority Exception"); + priority.addSuppressed(suppressed); + throw priority; + } + + private static void suppressNewExceptionWithCauseUnder(RuntimeException suppressor, RuntimeException suppressedCause) { + suppressor.addSuppressed(new RuntimeException("Suppressed Exception", suppressedCause)); + throw suppressor; + } + + private static void throwNewExceptionWithCauseThatSuppresses(RuntimeException suppressed, RuntimeException suppressorCause) { + RuntimeException priority = new RuntimeException("Priority Exception", suppressorCause); + priority.addSuppressed(suppressed); + throw priority; + }; + + private static RuntimeException recurseAndCatchRuntime(int depth, Runnable op) { + return expectThrows(RuntimeException.class, () -> doRecurse(depth, 0, op)); + } + + private static void recurseUntil(int depth, Runnable op) { + doRecurse(depth, 0, op); + } + + private static void doRecurse(int depth, int current, Runnable op) { + if (depth == current) { + op.run(); + } else { + doRecurse(depth, current + 1, op); + } + } + + public void testCompressStackTraceElement() { + assertThat(compressPackages(""), equalTo("")); + assertThat(compressPackages("."), equalTo("")); + assertThat(compressPackages("ClassName"), equalTo("ClassName")); + assertThat(compressPackages("alfa.ClassName"), equalTo("a.ClassName")); + assertThat(compressPackages("alfa.bravo.ClassName"), equalTo("a.b.ClassName")); + assertThat(compressPackages(".ClassName"), equalTo("ClassName")); + assertThat(compressPackages(".alfa.ClassName"), equalTo("a.ClassName")); + assertThat(compressPackages(".alfa.bravo.ClassName"), equalTo("a.b.ClassName")); + assertThat(compressPackages("...alfa.....ClassName"), equalTo("a.ClassName")); + assertThat(compressPackages("...alfa....bravo.ClassName"), equalTo("a.b.ClassName")); + assertThat(compressPackages("...alfa....bravo.charliepackagenameisreallyreallylong.ClassName"), equalTo("a.b.c.ClassName")); + assertThat(compressPackages("alfa.bravo.charlie.OuterClassName.InnerClassName"), equalTo("a.b.c.O.InnerClassName")); + + expectThrows(AssertionError.class, () -> compressPackages(null)); + } + + private static String compressPackages(String className) { + StringBuilder s = new StringBuilder(); + ExceptionsHelper.compressPackages(s, className); + return s.toString(); + } } diff --git a/server/src/test/java/org/elasticsearch/action/bulk/FailureStoreDocumentConverterTests.java b/server/src/test/java/org/elasticsearch/action/bulk/FailureStoreDocumentConverterTests.java index 85cedff9145be..c03d5e16b287b 100644 --- a/server/src/test/java/org/elasticsearch/action/bulk/FailureStoreDocumentConverterTests.java +++ b/server/src/test/java/org/elasticsearch/action/bulk/FailureStoreDocumentConverterTests.java @@ -90,11 +90,11 @@ public void testFailureStoreDocumentConversion() throws Exception { assertThat(ObjectPath.eval("error.message", convertedRequest.sourceAsMap()), is(equalTo("Test exception please ignore"))); assertThat( ObjectPath.eval("error.stack_trace", convertedRequest.sourceAsMap()), - startsWith("org.elasticsearch.ElasticsearchException: Test exception please ignore") + startsWith("o.e.ElasticsearchException: Test exception please ignore") ); assertThat( ObjectPath.eval("error.stack_trace", convertedRequest.sourceAsMap()), - containsString("at org.elasticsearch.action.bulk.FailureStoreDocumentConverterTests.testFailureStoreDocumentConversion") + containsString("at o.e.a.b.FailureStoreDocumentConverterTests.testFailureStoreDocumentConversion") ); assertThat( ObjectPath.eval("error.pipeline_trace", convertedRequest.sourceAsMap()), From ac32bd9f8c39823011ffc0b0f153d93d80595068 Mon Sep 17 00:00:00 2001 From: elasticsearchmachine Date: Sat, 3 Aug 2024 06:12:21 +0000 Subject: [PATCH 28/36] [Automated] Update Lucene snapshot to 9.12.0-snapshot-65be22a6221 --- build-tools-internal/version.properties | 2 +- gradle/verification-metadata.xml | 144 ++++++++++++------------ 2 files changed, 73 insertions(+), 73 deletions(-) diff --git a/build-tools-internal/version.properties b/build-tools-internal/version.properties index b3152dbcb4b51..06ff73a3e75ce 100644 --- a/build-tools-internal/version.properties +++ b/build-tools-internal/version.properties @@ -1,5 +1,5 @@ elasticsearch = 8.16.0 -lucene = 9.12.0-snapshot-e83c1d4234c +lucene = 9.12.0-snapshot-65be22a6221 bundled_jdk_vendor = openjdk bundled_jdk = 22.0.1+8@c7ec1332f7bb44aeba2eb341ae18aca4 diff --git a/gradle/verification-metadata.xml b/gradle/verification-metadata.xml index c920316c460f3..30080f85f2424 100644 --- a/gradle/verification-metadata.xml +++ b/gradle/verification-metadata.xml @@ -2809,124 +2809,124 @@ - - - + + + - - - + + + - - - + + + - - - + + + - - - + + + - - - + + + - - - + + + - - - + + + - - - + + + - - - + + + - - - + + + - - - + + + - - - + + + - - - + + + - - - + + + - - - + + + - - - + + + - - - + + + - - - + + + - - - + + + - - - + + + - - - + + + - - - + + + - - - + + + From 8bca8277507c1cb37934e0ba1a1f0fc0a4f783b0 Mon Sep 17 00:00:00 2001 From: elasticsearchmachine Date: Sun, 4 Aug 2024 06:11:34 +0000 Subject: [PATCH 29/36] [Automated] Update Lucene snapshot to 9.12.0-snapshot-65be22a6221 --- gradle/verification-metadata.xml | 48 ++++++++++++++++---------------- 1 file changed, 24 insertions(+), 24 deletions(-) diff --git a/gradle/verification-metadata.xml b/gradle/verification-metadata.xml index 30080f85f2424..4389c7751575e 100644 --- a/gradle/verification-metadata.xml +++ b/gradle/verification-metadata.xml @@ -2811,122 +2811,122 @@ - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + From e58678da4eee1c3f844c80e6ac0a4df8aadacca5 Mon Sep 17 00:00:00 2001 From: Yang Wang Date: Mon, 5 Aug 2024 09:40:27 +1000 Subject: [PATCH 30/36] Use higher precision time so that 0 can represent missing data (#111554) The time computation uses 0 to represent missing value. In #111502, we lowered the precision from micros to ms. For requests that completed under 1 ms, their time metric are now considered missing. This PR fixes it by raising the precision to nanoseconds which is the native resolution of the s3 time metric and lower it to ms only for recording the metric. Resolves: #111549 Resolves: #111550 --- .../repositories/s3/S3BlobStore.java | 25 +++++++++++-------- 1 file changed, 14 insertions(+), 11 deletions(-) diff --git a/modules/repository-s3/src/main/java/org/elasticsearch/repositories/s3/S3BlobStore.java b/modules/repository-s3/src/main/java/org/elasticsearch/repositories/s3/S3BlobStore.java index fbf4767bd3e99..03605d50750f0 100644 --- a/modules/repository-s3/src/main/java/org/elasticsearch/repositories/s3/S3BlobStore.java +++ b/modules/repository-s3/src/main/java/org/elasticsearch/repositories/s3/S3BlobStore.java @@ -224,11 +224,13 @@ private void maybeRecordHttpRequestTime(Request request) { return; } - final long totalTimeInMillis = getTotalTimeInMillis(requestTimesIncludingRetries); - if (totalTimeInMillis == 0) { + final long totalTimeInNanos = getTotalTimeInNanos(requestTimesIncludingRetries); + if (totalTimeInNanos == 0) { logger.warn("Expected HttpRequestTime to be tracked for request [{}] but found no count.", request); } else { - s3RepositoriesMetrics.common().httpRequestTimeInMillisHistogram().record(totalTimeInMillis, attributes); + s3RepositoriesMetrics.common() + .httpRequestTimeInMillisHistogram() + .record(TimeUnit.NANOSECONDS.toMillis(totalTimeInNanos), attributes); } } @@ -271,19 +273,20 @@ private static long getCountForMetric(TimingInfo info, AWSRequestMetrics.Field f } } - private static long getTotalTimeInMillis(List requestTimesIncludingRetries) { - // Here we calculate the timing in Milliseconds for the sum of the individual subMeasurements with the goal of deriving the TTFB - // (time to first byte). We calculate the time in millis for later use with an APM style counter (exposed as a long), rather than - // using the default double exposed by getTimeTakenMillisIfKnown(). We don't need sub-millisecond precision. So no need perform - // the data type castings. - long totalTimeInMillis = 0; + private static long getTotalTimeInNanos(List requestTimesIncludingRetries) { + // Here we calculate the timing in Nanoseconds for the sum of the individual subMeasurements with the goal of deriving the TTFB + // (time to first byte). We use high precision time here to tell from the case when request time metric is missing (0). + // The time is converted to milliseconds for later use with an APM style counter (exposed as a long), rather than using the + // default double exposed by getTimeTakenMillisIfKnown(). + // We don't need sub-millisecond precision. So no need perform the data type castings. + long totalTimeInNanos = 0; for (TimingInfo timingInfo : requestTimesIncludingRetries) { var endTimeInNanos = timingInfo.getEndTimeNanoIfKnown(); if (endTimeInNanos != null) { - totalTimeInMillis += TimeUnit.NANOSECONDS.toMillis(endTimeInNanos - timingInfo.getStartTimeNano()); + totalTimeInNanos += endTimeInNanos - timingInfo.getStartTimeNano(); } } - return totalTimeInMillis; + return totalTimeInNanos; } @Override From 1698430bbb56875d2aab9e54a3b59e982086434c Mon Sep 17 00:00:00 2001 From: elasticsearchmachine Date: Mon, 5 Aug 2024 06:12:12 +0000 Subject: [PATCH 31/36] [Automated] Update Lucene snapshot to 9.12.0-snapshot-65be22a6221 --- gradle/verification-metadata.xml | 48 ++++++++++++++++---------------- 1 file changed, 24 insertions(+), 24 deletions(-) diff --git a/gradle/verification-metadata.xml b/gradle/verification-metadata.xml index 4389c7751575e..c3dcdbe4ea584 100644 --- a/gradle/verification-metadata.xml +++ b/gradle/verification-metadata.xml @@ -2811,122 +2811,122 @@ - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + From bf7be8e23a8dd89bc8c36e82b812b728cddeb8f0 Mon Sep 17 00:00:00 2001 From: Armin Braun Date: Mon, 5 Aug 2024 10:21:13 +0200 Subject: [PATCH 32/36] Save 400 LoC in tests by using indexSettings shortcut (#111573) It's in the title, randomly saw a bunch of spots where we're not using the shortcut, figured I'd clean this up quickly to save ~400 lines. --- ...riesAggregationsUnlimitedDimensionsIT.java | 7 +- .../TimeSeriesNestedAggregationsIT.java | 7 +- .../datastreams/DataStreamAutoshardingIT.java | 18 +---- .../TSDBPassthroughIndexingIT.java | 10 +-- .../DataStreamLifecycleServiceIT.java | 5 +- .../DataStreamLifecycleServiceTests.java | 22 +++---- .../upgrades/FullClusterRestartIT.java | 17 +---- .../elasticsearch/backwards/IndexingIT.java | 34 ++-------- .../elasticsearch/backwards/RareTermsIT.java | 5 +- .../SearchWithMinCompatibleSearchNodeIT.java | 10 +-- .../elasticsearch/upgrades/RecoveryIT.java | 41 +++--------- .../upgrades/SnapshotBasedRecoveryIT.java | 5 +- .../admin/cluster/stats/ClusterStatsIT.java | 4 +- .../admin/indices/create/CreateIndexIT.java | 10 +-- .../admin/indices/rollover/RolloverIT.java | 3 +- .../bulk/BulkAfterWriteFsyncFailureIT.java | 9 +-- .../cluster/allocation/ClusterRerouteIT.java | 10 +-- .../allocation/decider/MockDiskUsagesIT.java | 16 ++--- .../discovery/DiscoveryDisruptionIT.java | 12 +--- .../gateway/RecoveryFromGatewayIT.java | 6 +- .../org/elasticsearch/get/GetActionIT.java | 9 +-- .../get/GetFromTranslogActionIT.java | 9 +-- .../get/ShardMultiGetFomTranslogActionIT.java | 9 +-- .../indices/recovery/IndexRecoveryIT.java | 5 +- .../indices/state/OpenCloseIndexIT.java | 5 +- .../aggregations/bucket/DateHistogramIT.java | 3 +- .../aggregations/bucket/DateRangeIT.java | 3 +- .../aggregations/bucket/DoubleTermsIT.java | 3 +- .../aggregations/bucket/HistogramIT.java | 3 +- .../aggregations/bucket/LongTermsIT.java | 4 +- .../search/aggregations/bucket/RangeIT.java | 3 +- .../SignificantTermsSignificanceScoreIT.java | 3 +- .../bucket/terms/RareTermsIT.java | 5 +- .../bucket/terms/StringTermsIT.java | 3 +- .../aggregations/metrics/ExtendedStatsIT.java | 4 +- .../metrics/HDRPercentileRanksIT.java | 4 +- .../metrics/HDRPercentilesIT.java | 4 +- .../metrics/MedianAbsoluteDeviationIT.java | 4 +- .../metrics/ScriptedMetricIT.java | 4 +- .../search/aggregations/metrics/StatsIT.java | 4 +- .../search/aggregations/metrics/SumIT.java | 4 +- .../metrics/TDigestPercentileRanksIT.java | 4 +- .../metrics/TDigestPercentilesIT.java | 4 +- .../aggregations/metrics/TopHitsIT.java | 4 +- .../aggregations/metrics/ValueCountIT.java | 4 +- .../aggregation/AggregationProfilerIT.java | 10 +-- .../snapshots/RepositoriesIT.java | 2 +- .../SharedClusterSnapshotRestoreIT.java | 4 +- .../snapshots/SnapshotStressTestsIT.java | 7 +- .../cluster/metadata/DataStreamTests.java | 11 +--- ...ailabilityHealthIndicatorServiceTests.java | 5 +- .../PersistedClusterStateServiceTests.java | 65 ++++--------------- .../snapshots/SnapshotResiliencyTests.java | 5 +- .../index/engine/EngineTestCase.java | 7 +- .../ESIndexLevelReplicationTestCase.java | 5 +- .../index/shard/IndexShardTestCase.java | 5 +- .../AbstractIndexRecoveryIntegTestCase.java | 21 +----- .../test/ESSingleNodeTestCase.java | 2 +- .../test/IndexSettingsModule.java | 8 +-- .../elasticsearch/xpack/ccr/AutoFollowIT.java | 45 +++---------- ...UntilTimeSeriesEndTimePassesStepTests.java | 9 +-- .../xpack/eql/qa/mixed_node/EqlSearchIT.java | 11 +--- .../action/AbstractPausableIntegTestCase.java | 7 +- .../ml/transforms/PainlessDomainSplitIT.java | 9 +-- .../SearchableSnapshotsResizeIntegTests.java | 7 +- .../authz/SnapshotUserRoleIntegTests.java | 2 +- .../xpack/sql/qa/mixed_node/SqlSearchIT.java | 11 +--- .../integration/TransformAuditorIT.java | 5 +- .../watcher/watch/WatchStoreUtilsTests.java | 5 +- 69 files changed, 132 insertions(+), 498 deletions(-) diff --git a/modules/aggregations/src/internalClusterTest/java/org/elasticsearch/aggregations/bucket/TimeSeriesAggregationsUnlimitedDimensionsIT.java b/modules/aggregations/src/internalClusterTest/java/org/elasticsearch/aggregations/bucket/TimeSeriesAggregationsUnlimitedDimensionsIT.java index 18b24123e6cf0..63f58bbb75713 100644 --- a/modules/aggregations/src/internalClusterTest/java/org/elasticsearch/aggregations/bucket/TimeSeriesAggregationsUnlimitedDimensionsIT.java +++ b/modules/aggregations/src/internalClusterTest/java/org/elasticsearch/aggregations/bucket/TimeSeriesAggregationsUnlimitedDimensionsIT.java @@ -17,7 +17,6 @@ import org.elasticsearch.aggregations.AggregationIntegTestCase; import org.elasticsearch.aggregations.bucket.timeseries.InternalTimeSeries; import org.elasticsearch.aggregations.bucket.timeseries.TimeSeriesAggregationBuilder; -import org.elasticsearch.common.settings.Settings; import org.elasticsearch.index.mapper.DateFieldMapper; import org.elasticsearch.index.mapper.MapperService; import org.elasticsearch.index.query.MatchAllQueryBuilder; @@ -102,15 +101,11 @@ private CreateIndexResponse prepareTimeSeriesIndex( final String[] routingDimensions ) { return prepareCreate("index").setSettings( - Settings.builder() - .put("mode", "time_series") + indexSettings(randomIntBetween(1, 3), randomIntBetween(1, 3)).put("mode", "time_series") .put("routing_path", String.join(",", routingDimensions)) - .put("index.number_of_shards", randomIntBetween(1, 3)) - .put("index.number_of_replicas", randomIntBetween(1, 3)) .put("time_series.start_time", startMillis) .put("time_series.end_time", endMillis) .put(MapperService.INDEX_MAPPING_FIELD_NAME_LENGTH_LIMIT_SETTING.getKey(), 4192) - .build() ).setMapping(mapping).get(); } diff --git a/modules/aggregations/src/internalClusterTest/java/org/elasticsearch/aggregations/bucket/TimeSeriesNestedAggregationsIT.java b/modules/aggregations/src/internalClusterTest/java/org/elasticsearch/aggregations/bucket/TimeSeriesNestedAggregationsIT.java index 3287f50ab1739..2967e6f5e322f 100644 --- a/modules/aggregations/src/internalClusterTest/java/org/elasticsearch/aggregations/bucket/TimeSeriesNestedAggregationsIT.java +++ b/modules/aggregations/src/internalClusterTest/java/org/elasticsearch/aggregations/bucket/TimeSeriesNestedAggregationsIT.java @@ -16,7 +16,6 @@ import org.elasticsearch.aggregations.AggregationIntegTestCase; import org.elasticsearch.aggregations.bucket.timeseries.InternalTimeSeries; import org.elasticsearch.aggregations.bucket.timeseries.TimeSeriesAggregationBuilder; -import org.elasticsearch.common.settings.Settings; import org.elasticsearch.index.mapper.DateFieldMapper; import org.elasticsearch.index.mapper.MapperService; import org.elasticsearch.index.query.MatchAllQueryBuilder; @@ -103,15 +102,11 @@ private CreateIndexResponse prepareTimeSeriesIndex( final String[] routingDimensions ) { return prepareCreate("index").setSettings( - Settings.builder() - .put("mode", "time_series") + indexSettings(randomIntBetween(1, 3), randomIntBetween(1, 3)).put("mode", "time_series") .put("routing_path", String.join(",", routingDimensions)) - .put("index.number_of_shards", randomIntBetween(1, 3)) - .put("index.number_of_replicas", randomIntBetween(1, 3)) .put("time_series.start_time", startMillis) .put("time_series.end_time", endMillis) .put(MapperService.INDEX_MAPPING_FIELD_NAME_LENGTH_LIMIT_SETTING.getKey(), 4192) - .build() ).setMapping(mapping).get(); } diff --git a/modules/data-streams/src/internalClusterTest/java/org/elasticsearch/datastreams/DataStreamAutoshardingIT.java b/modules/data-streams/src/internalClusterTest/java/org/elasticsearch/datastreams/DataStreamAutoshardingIT.java index a4c9a9d3e1c67..dbae2ec660181 100644 --- a/modules/data-streams/src/internalClusterTest/java/org/elasticsearch/datastreams/DataStreamAutoshardingIT.java +++ b/modules/data-streams/src/internalClusterTest/java/org/elasticsearch/datastreams/DataStreamAutoshardingIT.java @@ -112,11 +112,7 @@ public void resetClusterSetting() { public void testRolloverOnAutoShardCondition() throws Exception { final String dataStreamName = "logs-es"; - putComposableIndexTemplate( - "my-template", - List.of("logs-*"), - Settings.builder().put(IndexMetadata.SETTING_NUMBER_OF_SHARDS, 3).put(IndexMetadata.SETTING_NUMBER_OF_REPLICAS, 0).build() - ); + putComposableIndexTemplate("my-template", List.of("logs-*"), indexSettings(3, 0).build()); final var createDataStreamRequest = new CreateDataStreamAction.Request(dataStreamName); assertAcked(client().execute(CreateDataStreamAction.INSTANCE, createDataStreamRequest).actionGet()); @@ -277,11 +273,7 @@ public void testReduceShardsOnRollover() throws IOException { final String dataStreamName = "logs-es"; // start with 3 shards - putComposableIndexTemplate( - "my-template", - List.of("logs-*"), - Settings.builder().put(IndexMetadata.SETTING_NUMBER_OF_SHARDS, 3).put(IndexMetadata.SETTING_NUMBER_OF_REPLICAS, 0).build() - ); + putComposableIndexTemplate("my-template", List.of("logs-*"), indexSettings(3, 0).build()); final var createDataStreamRequest = new CreateDataStreamAction.Request(dataStreamName); assertAcked(client().execute(CreateDataStreamAction.INSTANCE, createDataStreamRequest).actionGet()); @@ -391,11 +383,7 @@ public void testReduceShardsOnRollover() throws IOException { public void testLazyRolloverKeepsPreviousAutoshardingDecision() throws IOException { final String dataStreamName = "logs-es"; - putComposableIndexTemplate( - "my-template", - List.of("logs-*"), - Settings.builder().put(IndexMetadata.SETTING_NUMBER_OF_SHARDS, 3).put(IndexMetadata.SETTING_NUMBER_OF_REPLICAS, 0).build() - ); + putComposableIndexTemplate("my-template", List.of("logs-*"), indexSettings(3, 0).build()); final var createDataStreamRequest = new CreateDataStreamAction.Request(dataStreamName); assertAcked(client().execute(CreateDataStreamAction.INSTANCE, createDataStreamRequest).actionGet()); diff --git a/modules/data-streams/src/internalClusterTest/java/org/elasticsearch/datastreams/TSDBPassthroughIndexingIT.java b/modules/data-streams/src/internalClusterTest/java/org/elasticsearch/datastreams/TSDBPassthroughIndexingIT.java index b8d7d18dec475..64e4cc2ba9577 100644 --- a/modules/data-streams/src/internalClusterTest/java/org/elasticsearch/datastreams/TSDBPassthroughIndexingIT.java +++ b/modules/data-streams/src/internalClusterTest/java/org/elasticsearch/datastreams/TSDBPassthroughIndexingIT.java @@ -142,10 +142,7 @@ protected Settings nodeSettings() { } public void testIndexingGettingAndSearching() throws Exception { - var templateSettings = Settings.builder() - .put("index.mode", "time_series") - .put("index.number_of_shards", randomIntBetween(2, 10)) - .put("index.number_of_replicas", 0); + var templateSettings = indexSettings(randomIntBetween(2, 10), 0).put("index.mode", "time_series"); var request = new TransportPutComposableIndexTemplateAction.Request("id"); request.indexTemplate( @@ -218,10 +215,7 @@ public void testIndexingGettingAndSearching() throws Exception { public void testIndexingGettingAndSearchingShrunkIndex() throws Exception { String dataStreamName = "k8s"; - var templateSettings = Settings.builder() - .put("index.mode", "time_series") - .put("index.number_of_shards", 8) - .put("index.number_of_replicas", 0); + var templateSettings = indexSettings(8, 0).put("index.mode", "time_series"); var request = new TransportPutComposableIndexTemplateAction.Request("id"); request.indexTemplate( diff --git a/modules/data-streams/src/internalClusterTest/java/org/elasticsearch/datastreams/lifecycle/DataStreamLifecycleServiceIT.java b/modules/data-streams/src/internalClusterTest/java/org/elasticsearch/datastreams/lifecycle/DataStreamLifecycleServiceIT.java index e7dfdcdaffa05..b71a545207cbe 100644 --- a/modules/data-streams/src/internalClusterTest/java/org/elasticsearch/datastreams/lifecycle/DataStreamLifecycleServiceIT.java +++ b/modules/data-streams/src/internalClusterTest/java/org/elasticsearch/datastreams/lifecycle/DataStreamLifecycleServiceIT.java @@ -416,10 +416,7 @@ public void testAutomaticForceMerge() throws Exception { "id1", null, List.of(dataStreamName + "*"), - Settings.builder() - .put("index.number_of_replicas", 1) - .put("index.number_of_shards", 1) - .put(MergePolicyConfig.INDEX_MERGE_POLICY_FLOOR_SEGMENT_SETTING.getKey(), ONE_HUNDRED_MB) + indexSettings(1, 1).put(MergePolicyConfig.INDEX_MERGE_POLICY_FLOOR_SEGMENT_SETTING.getKey(), ONE_HUNDRED_MB) .put(MergePolicyConfig.INDEX_MERGE_POLICY_MERGE_FACTOR_SETTING.getKey(), TARGET_MERGE_FACTOR_VALUE) .build(), null, diff --git a/modules/data-streams/src/test/java/org/elasticsearch/datastreams/lifecycle/DataStreamLifecycleServiceTests.java b/modules/data-streams/src/test/java/org/elasticsearch/datastreams/lifecycle/DataStreamLifecycleServiceTests.java index c965eb2ba2536..6cd9ce1e152c8 100644 --- a/modules/data-streams/src/test/java/org/elasticsearch/datastreams/lifecycle/DataStreamLifecycleServiceTests.java +++ b/modules/data-streams/src/test/java/org/elasticsearch/datastreams/lifecycle/DataStreamLifecycleServiceTests.java @@ -1392,13 +1392,7 @@ public void testTimeSeriesIndicesStillWithinTimeBounds() { { // non time_series indices are not within time bounds (they don't have any) IndexMetadata indexMeta = IndexMetadata.builder(randomAlphaOfLengthBetween(10, 30)) - .settings( - Settings.builder() - .put(IndexMetadata.INDEX_NUMBER_OF_SHARDS_SETTING.getKey(), 1) - .put(IndexMetadata.INDEX_NUMBER_OF_REPLICAS_SETTING.getKey(), 1) - .put(IndexMetadata.SETTING_INDEX_VERSION_CREATED.getKey(), IndexVersion.current()) - .build() - ) + .settings(indexSettings(1, 1).put(IndexMetadata.SETTING_INDEX_VERSION_CREATED.getKey(), IndexVersion.current())) .build(); Metadata newMetadata = Metadata.builder(clusterState.metadata()).put(indexMeta, true).build(); @@ -1596,12 +1590,14 @@ private ClusterState createClusterState(String indexName, Map cu var routingTableBuilder = RoutingTable.builder(); Metadata.Builder metadataBuilder = Metadata.builder(); Map indices = new HashMap<>(); - Settings indexSettings = Settings.builder() - .put(IndexMetadata.INDEX_NUMBER_OF_SHARDS_SETTING.getKey(), randomIntBetween(1, 10)) - .put(IndexMetadata.INDEX_NUMBER_OF_REPLICAS_SETTING.getKey(), randomIntBetween(0, 3)) - .put(IndexMetadata.SETTING_INDEX_VERSION_CREATED.getKey(), IndexVersion.current()) - .build(); - IndexMetadata.Builder indexMetadataBuilder = IndexMetadata.builder(indexName).version(randomLong()).settings(indexSettings); + IndexMetadata.Builder indexMetadataBuilder = IndexMetadata.builder(indexName) + .version(randomLong()) + .settings( + indexSettings(randomIntBetween(1, 10), randomIntBetween(0, 3)).put( + IndexMetadata.SETTING_INDEX_VERSION_CREATED.getKey(), + IndexVersion.current() + ) + ); if (customDataStreamLifecycleMetadata != null) { indexMetadataBuilder.putCustom(LIFECYCLE_CUSTOM_INDEX_METADATA_KEY, customDataStreamLifecycleMetadata); } diff --git a/qa/full-cluster-restart/src/javaRestTest/java/org/elasticsearch/upgrades/FullClusterRestartIT.java b/qa/full-cluster-restart/src/javaRestTest/java/org/elasticsearch/upgrades/FullClusterRestartIT.java index 1a86947acab95..20c13ca92f5c1 100644 --- a/qa/full-cluster-restart/src/javaRestTest/java/org/elasticsearch/upgrades/FullClusterRestartIT.java +++ b/qa/full-cluster-restart/src/javaRestTest/java/org/elasticsearch/upgrades/FullClusterRestartIT.java @@ -920,9 +920,7 @@ public void testEmptyShard() throws IOException { final String indexName = "test_empty_shard"; if (isRunningAgainstOldCluster()) { - Settings.Builder settings = Settings.builder() - .put(IndexMetadata.INDEX_NUMBER_OF_SHARDS_SETTING.getKey(), 1) - .put(IndexMetadata.INDEX_NUMBER_OF_REPLICAS_SETTING.getKey(), 1) + Settings.Builder settings = indexSettings(1, 1) // if the node with the replica is the first to be restarted, while a replica is still recovering // then delayed allocation will kick in. When the node comes back, the master will search for a copy // but the recovering copy will be seen as invalid and the cluster health won't return to GREEN @@ -1522,14 +1520,7 @@ public void testOperationBasedRecovery() throws Exception { */ public void testTurnOffTranslogRetentionAfterUpgraded() throws Exception { if (isRunningAgainstOldCluster()) { - createIndex( - index, - Settings.builder() - .put(IndexMetadata.INDEX_NUMBER_OF_SHARDS_SETTING.getKey(), 1) - .put(IndexMetadata.INDEX_NUMBER_OF_REPLICAS_SETTING.getKey(), 1) - .put(IndexSettings.INDEX_SOFT_DELETES_SETTING.getKey(), true) - .build() - ); + createIndex(index, indexSettings(1, 1).put(IndexSettings.INDEX_SOFT_DELETES_SETTING.getKey(), true).build()); ensureGreen(index); int numDocs = randomIntBetween(10, 100); for (int i = 0; i < numDocs; i++) { @@ -1549,9 +1540,7 @@ public void testTurnOffTranslogRetentionAfterUpgraded() throws Exception { public void testResize() throws Exception { int numDocs; if (isRunningAgainstOldCluster()) { - final Settings.Builder settings = Settings.builder() - .put(IndexMetadata.INDEX_NUMBER_OF_SHARDS_SETTING.getKey(), 3) - .put(IndexMetadata.INDEX_NUMBER_OF_REPLICAS_SETTING.getKey(), 1); + final Settings.Builder settings = indexSettings(3, 1); if (minimumIndexVersion().before(IndexVersions.V_8_0_0) && randomBoolean()) { settings.put(IndexSettings.INDEX_SOFT_DELETES_SETTING.getKey(), false); } diff --git a/qa/mixed-cluster/src/test/java/org/elasticsearch/backwards/IndexingIT.java b/qa/mixed-cluster/src/test/java/org/elasticsearch/backwards/IndexingIT.java index 6c924fe8e429a..825a866cdf2f8 100644 --- a/qa/mixed-cluster/src/test/java/org/elasticsearch/backwards/IndexingIT.java +++ b/qa/mixed-cluster/src/test/java/org/elasticsearch/backwards/IndexingIT.java @@ -13,7 +13,6 @@ import org.elasticsearch.client.Response; import org.elasticsearch.client.ResponseException; import org.elasticsearch.client.RestClient; -import org.elasticsearch.cluster.metadata.IndexMetadata; import org.elasticsearch.common.Strings; import org.elasticsearch.common.settings.Settings; import org.elasticsearch.common.xcontent.support.XContentMapValues; @@ -75,10 +74,7 @@ public void testIndexVersionPropagation() throws Exception { logger.info("cluster discovered: {}", nodes.toString()); final List bwcNamesList = nodes.getBWCNodes().stream().map(MixedClusterTestNode::nodeName).collect(Collectors.toList()); final String bwcNames = bwcNamesList.stream().collect(Collectors.joining(",")); - Settings.Builder settings = Settings.builder() - .put(IndexMetadata.INDEX_NUMBER_OF_SHARDS_SETTING.getKey(), 1) - .put(IndexMetadata.INDEX_NUMBER_OF_REPLICAS_SETTING.getKey(), 2) - .put("index.routing.allocation.include._name", bwcNames); + Settings.Builder settings = indexSettings(1, 2).put("index.routing.allocation.include._name", bwcNames); final String index = "indexversionprop"; final int minUpdates = 5; final int maxUpdates = 10; @@ -165,10 +161,7 @@ public void testSeqNoCheckpoints() throws Exception { logger.info("cluster discovered: {}", nodes.toString()); final List bwcNamesList = nodes.getBWCNodes().stream().map(MixedClusterTestNode::nodeName).collect(Collectors.toList()); final String bwcNames = bwcNamesList.stream().collect(Collectors.joining(",")); - Settings.Builder settings = Settings.builder() - .put(IndexMetadata.INDEX_NUMBER_OF_SHARDS_SETTING.getKey(), 1) - .put(IndexMetadata.INDEX_NUMBER_OF_REPLICAS_SETTING.getKey(), 2) - .put("index.routing.allocation.include._name", bwcNames); + Settings.Builder settings = indexSettings(1, 2).put("index.routing.allocation.include._name", bwcNames); final String index = "test"; createIndex(index, settings.build()); @@ -251,10 +244,7 @@ public void testUpdateSnapshotStatus() throws Exception { String bwcNames = nodes.getBWCNodes().stream().map(MixedClusterTestNode::nodeName).collect(Collectors.joining(",")); // Allocating shards on the BWC nodes to makes sure that taking snapshot happens on those nodes. - Settings.Builder settings = Settings.builder() - .put(IndexMetadata.INDEX_NUMBER_OF_SHARDS_SETTING.getKey(), between(5, 10)) - .put(IndexMetadata.INDEX_NUMBER_OF_REPLICAS_SETTING.getKey(), 1) - .put("index.routing.allocation.include._name", bwcNames); + Settings.Builder settings = indexSettings(between(5, 10), 1).put("index.routing.allocation.include._name", bwcNames); final String index = "test-snapshot-index"; createIndex(index, settings.build()); @@ -315,14 +305,7 @@ public void testSyncedFlushTransition() throws Exception { int numOfReplicas = randomIntBetween(0, nodes.getNewNodes().size() - 1); int totalShards = numShards * (numOfReplicas + 1); final String index = "test_synced_flush"; - createIndex( - index, - Settings.builder() - .put(IndexMetadata.INDEX_NUMBER_OF_SHARDS_SETTING.getKey(), numShards) - .put(IndexMetadata.SETTING_NUMBER_OF_REPLICAS, numOfReplicas) - .put("index.routing.allocation.include._name", newNodes) - .build() - ); + createIndex(index, indexSettings(numShards, numOfReplicas).put("index.routing.allocation.include._name", newNodes).build()); ensureGreen(index); indexDocs(index, randomIntBetween(0, 100), between(1, 100)); try ( @@ -394,14 +377,7 @@ public void testFlushTransition() throws Exception { int numOfReplicas = randomIntBetween(0, nodes.getNewNodes().size() - 1); int totalShards = numShards * (numOfReplicas + 1); final String index = "test_flush"; - createIndex( - index, - Settings.builder() - .put(IndexMetadata.INDEX_NUMBER_OF_SHARDS_SETTING.getKey(), numShards) - .put(IndexMetadata.SETTING_NUMBER_OF_REPLICAS, numOfReplicas) - .put("index.routing.allocation.include._name", newNodes) - .build() - ); + createIndex(index, indexSettings(numShards, numOfReplicas).put("index.routing.allocation.include._name", newNodes).build()); ensureGreen(index); indexDocs(index, randomIntBetween(0, 100), between(1, 100)); try ( diff --git a/qa/mixed-cluster/src/test/java/org/elasticsearch/backwards/RareTermsIT.java b/qa/mixed-cluster/src/test/java/org/elasticsearch/backwards/RareTermsIT.java index a33fc01d8446a..f2ca41e5ef8bc 100644 --- a/qa/mixed-cluster/src/test/java/org/elasticsearch/backwards/RareTermsIT.java +++ b/qa/mixed-cluster/src/test/java/org/elasticsearch/backwards/RareTermsIT.java @@ -10,7 +10,6 @@ import org.elasticsearch.client.Request; import org.elasticsearch.client.Response; -import org.elasticsearch.cluster.metadata.IndexMetadata; import org.elasticsearch.common.settings.Settings; import org.elasticsearch.common.xcontent.support.XContentMapValues; import org.elasticsearch.core.Strings; @@ -43,9 +42,7 @@ private int indexDocs(int numDocs, int id) throws Exception { } public void testSingleValuedString() throws Exception { - final Settings.Builder settings = Settings.builder() - .put(IndexMetadata.INDEX_NUMBER_OF_SHARDS_SETTING.getKey(), 2) - .put(IndexMetadata.INDEX_NUMBER_OF_REPLICAS_SETTING.getKey(), 0); + final Settings.Builder settings = indexSettings(2, 0); createIndex(index, settings.build()); // We want to trigger the usage oif cuckoo filters that happen only when there are // more than 10k distinct values in one shard. diff --git a/qa/mixed-cluster/src/test/java/org/elasticsearch/backwards/SearchWithMinCompatibleSearchNodeIT.java b/qa/mixed-cluster/src/test/java/org/elasticsearch/backwards/SearchWithMinCompatibleSearchNodeIT.java index 461b731e518fb..808ebb764768f 100644 --- a/qa/mixed-cluster/src/test/java/org/elasticsearch/backwards/SearchWithMinCompatibleSearchNodeIT.java +++ b/qa/mixed-cluster/src/test/java/org/elasticsearch/backwards/SearchWithMinCompatibleSearchNodeIT.java @@ -12,8 +12,6 @@ import org.elasticsearch.client.Response; import org.elasticsearch.client.ResponseException; import org.elasticsearch.client.RestClient; -import org.elasticsearch.cluster.metadata.IndexMetadata; -import org.elasticsearch.common.settings.Settings; import org.elasticsearch.core.Strings; import org.elasticsearch.rest.RestStatus; import org.elasticsearch.test.rest.ESRestTestCase; @@ -50,13 +48,7 @@ public void prepareTestData() throws IOException { allNodes.addAll(nodes.getNewNodes()); if (client().performRequest(new Request("HEAD", "/" + index)).getStatusLine().getStatusCode() == 404) { - createIndex( - index, - Settings.builder() - .put(IndexMetadata.INDEX_NUMBER_OF_SHARDS_SETTING.getKey(), numShards) - .put(IndexMetadata.SETTING_NUMBER_OF_REPLICAS, numReplicas) - .build() - ); + createIndex(index, indexSettings(numShards, numReplicas).build()); for (int i = 0; i < numDocs; i++) { Request request = new Request("PUT", index + "/_doc/" + i); request.setJsonEntity("{\"test\": \"test_" + randomAlphaOfLength(2) + "\"}"); diff --git a/qa/rolling-upgrade-legacy/src/test/java/org/elasticsearch/upgrades/RecoveryIT.java b/qa/rolling-upgrade-legacy/src/test/java/org/elasticsearch/upgrades/RecoveryIT.java index 43d5ea842f9ef..fe2236adc4904 100644 --- a/qa/rolling-upgrade-legacy/src/test/java/org/elasticsearch/upgrades/RecoveryIT.java +++ b/qa/rolling-upgrade-legacy/src/test/java/org/elasticsearch/upgrades/RecoveryIT.java @@ -60,9 +60,7 @@ public class RecoveryIT extends AbstractRollingTestCase { public void testHistoryUUIDIsGenerated() throws Exception { final String index = "index_history_uuid"; if (CLUSTER_TYPE == ClusterType.OLD) { - Settings.Builder settings = Settings.builder() - .put(IndexMetadata.INDEX_NUMBER_OF_SHARDS_SETTING.getKey(), 1) - .put(IndexMetadata.INDEX_NUMBER_OF_REPLICAS_SETTING.getKey(), 1) + Settings.Builder settings = indexSettings(1, 1) // if the node with the replica is the first to be restarted, while a replica is still recovering // then delayed allocation will kick in. When the node comes back, the master will search for a copy // but the recovering copy will be seen as invalid and the cluster health won't return to GREEN @@ -128,9 +126,7 @@ public void testRecoveryWithConcurrentIndexing() throws Exception { switch (CLUSTER_TYPE) { case OLD -> { - Settings.Builder settings = Settings.builder() - .put(IndexMetadata.INDEX_NUMBER_OF_SHARDS_SETTING.getKey(), 1) - .put(IndexMetadata.INDEX_NUMBER_OF_REPLICAS_SETTING.getKey(), 2) + Settings.Builder settings = indexSettings(1, 2) // if the node with the replica is the first to be restarted, while a replica is still recovering // then delayed allocation will kick in. When the node comes back, the master will search for a copy // but the recovering copy will be seen as invalid and the cluster health won't return to GREEN @@ -217,9 +213,7 @@ public void testRelocationWithConcurrentIndexing() throws Exception { final String index = "relocation_with_concurrent_indexing"; switch (CLUSTER_TYPE) { case OLD -> { - Settings.Builder settings = Settings.builder() - .put(IndexMetadata.INDEX_NUMBER_OF_SHARDS_SETTING.getKey(), 1) - .put(IndexMetadata.INDEX_NUMBER_OF_REPLICAS_SETTING.getKey(), 2) + Settings.Builder settings = indexSettings(1, 2) // if the node with the replica is the first to be restarted, while a replica is still recovering // then delayed allocation will kick in. When the node comes back, the master will search for a copy // but the recovering copy will be seen as invalid and the cluster health won't return to GREEN @@ -296,9 +290,7 @@ public void testRelocationWithConcurrentIndexing() throws Exception { public void testRecovery() throws Exception { final String index = "test_recovery"; if (CLUSTER_TYPE == ClusterType.OLD) { - Settings.Builder settings = Settings.builder() - .put(IndexMetadata.INDEX_NUMBER_OF_SHARDS_SETTING.getKey(), 1) - .put(IndexMetadata.INDEX_NUMBER_OF_REPLICAS_SETTING.getKey(), 1) + Settings.Builder settings = indexSettings(1, 1) // if the node with the replica is the first to be restarted, while a replica is still recovering // then delayed allocation will kick in. When the node comes back, the master will search for a copy // but the recovering copy will be seen as invalid and the cluster health won't return to GREEN @@ -413,9 +405,7 @@ public void testRecoveryClosedIndex() throws Exception { if (CLUSTER_TYPE == ClusterType.OLD) { createIndex( indexName, - Settings.builder() - .put(IndexMetadata.INDEX_NUMBER_OF_SHARDS_SETTING.getKey(), 1) - .put(IndexMetadata.INDEX_NUMBER_OF_REPLICAS_SETTING.getKey(), 1) + indexSettings(1, 1) // if the node with the replica is the first to be restarted, while a replica is still recovering // then delayed allocation will kick in. When the node comes back, the master will search for a copy // but the recovering copy will be seen as invalid and the cluster health won't return to GREEN @@ -453,13 +443,7 @@ public void testCloseIndexDuringRollingUpgrade() throws Exception { final String indexName = String.join("_", "index", CLUSTER_TYPE.toString(), Integer.toString(id)).toLowerCase(Locale.ROOT); if (indexExists(indexName) == false) { - createIndex( - indexName, - Settings.builder() - .put(IndexMetadata.INDEX_NUMBER_OF_SHARDS_SETTING.getKey(), 1) - .put(IndexMetadata.INDEX_NUMBER_OF_REPLICAS_SETTING.getKey(), 0) - .build() - ); + createIndex(indexName, indexSettings(1, 0).build()); ensureGreen(indexName); closeIndex(indexName); } @@ -482,10 +466,7 @@ public void testClosedIndexNoopRecovery() throws Exception { if (CLUSTER_TYPE == ClusterType.OLD) { createIndex( indexName, - Settings.builder() - .put(IndexMetadata.INDEX_NUMBER_OF_SHARDS_SETTING.getKey(), 1) - .put(IndexMetadata.INDEX_NUMBER_OF_REPLICAS_SETTING.getKey(), 0) - .put(EnableAllocationDecider.INDEX_ROUTING_REBALANCE_ENABLE_SETTING.getKey(), "none") + indexSettings(1, 0).put(EnableAllocationDecider.INDEX_ROUTING_REBALANCE_ENABLE_SETTING.getKey(), "none") .put(INDEX_DELAYED_NODE_LEFT_TIMEOUT_SETTING.getKey(), "24h") .put("index.routing.allocation.include._name", CLUSTER_NAME + "-0") .build() @@ -578,9 +559,7 @@ private void assertClosedIndex(final String index, final boolean checkRoutingTab public void testUpdateDoc() throws Exception { final String index = "test_update_doc"; if (CLUSTER_TYPE == ClusterType.OLD) { - Settings.Builder settings = Settings.builder() - .put(IndexMetadata.INDEX_NUMBER_OF_SHARDS_SETTING.getKey(), 1) - .put(IndexMetadata.INDEX_NUMBER_OF_REPLICAS_SETTING.getKey(), 2); + Settings.Builder settings = indexSettings(1, 2); createIndex(index, settings.build()); indexDocs(index, 0, 100); } @@ -648,9 +627,7 @@ private void assertNoopRecoveries(String indexName, Predicate targetNode public void testOperationBasedRecovery() throws Exception { final String index = "test_operation_based_recovery"; if (CLUSTER_TYPE == ClusterType.OLD) { - final Settings.Builder settings = Settings.builder() - .put(IndexMetadata.INDEX_NUMBER_OF_SHARDS_SETTING.getKey(), 1) - .put(IndexMetadata.INDEX_NUMBER_OF_REPLICAS_SETTING.getKey(), 2); + final Settings.Builder settings = indexSettings(1, 2); if (minimumIndexVersion().before(IndexVersions.V_8_0_0) && randomBoolean()) { settings.put(IndexSettings.INDEX_SOFT_DELETES_SETTING.getKey(), randomBoolean()); } diff --git a/qa/rolling-upgrade/src/javaRestTest/java/org/elasticsearch/upgrades/SnapshotBasedRecoveryIT.java b/qa/rolling-upgrade/src/javaRestTest/java/org/elasticsearch/upgrades/SnapshotBasedRecoveryIT.java index 593630546845d..2ee668c0e3fe1 100644 --- a/qa/rolling-upgrade/src/javaRestTest/java/org/elasticsearch/upgrades/SnapshotBasedRecoveryIT.java +++ b/qa/rolling-upgrade/src/javaRestTest/java/org/elasticsearch/upgrades/SnapshotBasedRecoveryIT.java @@ -59,10 +59,7 @@ public void testSnapshotBasedRecovery() throws Exception { final String repositoryName = "snapshot_based_recovery_repo"; final int numDocs = 200; if (isOldCluster()) { - Settings.Builder settings = Settings.builder() - .put(IndexMetadata.INDEX_NUMBER_OF_SHARDS_SETTING.getKey(), 1) - .put(IndexMetadata.INDEX_NUMBER_OF_REPLICAS_SETTING.getKey(), 0) - .put(INDEX_DELAYED_NODE_LEFT_TIMEOUT_SETTING.getKey(), "100ms") + Settings.Builder settings = indexSettings(1, 0).put(INDEX_DELAYED_NODE_LEFT_TIMEOUT_SETTING.getKey(), "100ms") .put(SETTING_ALLOCATION_MAX_RETRY.getKey(), "0"); // fail faster createIndex(indexName, settings.build()); ensureGreen(indexName); diff --git a/server/src/internalClusterTest/java/org/elasticsearch/action/admin/cluster/stats/ClusterStatsIT.java b/server/src/internalClusterTest/java/org/elasticsearch/action/admin/cluster/stats/ClusterStatsIT.java index 1fda9c67a0beb..0c3dac0f99b6c 100644 --- a/server/src/internalClusterTest/java/org/elasticsearch/action/admin/cluster/stats/ClusterStatsIT.java +++ b/server/src/internalClusterTest/java/org/elasticsearch/action/admin/cluster/stats/ClusterStatsIT.java @@ -161,7 +161,7 @@ public void testIndicesShardStats() throws ExecutionException, InterruptedExcept ClusterStatsResponse response = clusterAdmin().prepareClusterStats().get(); assertThat(response.getStatus(), Matchers.equalTo(ClusterHealthStatus.GREEN)); - prepareCreate("test1").setSettings(Settings.builder().put("number_of_shards", 2).put("number_of_replicas", 1)).get(); + prepareCreate("test1").setSettings(indexSettings(2, 1)).get(); response = clusterAdmin().prepareClusterStats().get(); assertThat(response.getStatus(), Matchers.equalTo(ClusterHealthStatus.YELLOW)); @@ -179,7 +179,7 @@ public void testIndicesShardStats() throws ExecutionException, InterruptedExcept assertThat(response.indicesStats.getDocs().getCount(), Matchers.equalTo(1L)); assertShardStats(response.getIndicesStats().getShards(), 1, 4, 2, 1.0); - prepareCreate("test2").setSettings(Settings.builder().put("number_of_shards", 3).put("number_of_replicas", 0)).get(); + prepareCreate("test2").setSettings(indexSettings(3, 0)).get(); ensureGreen(); response = clusterAdmin().prepareClusterStats().get(); assertThat(response.getStatus(), Matchers.equalTo(ClusterHealthStatus.GREEN)); diff --git a/server/src/internalClusterTest/java/org/elasticsearch/action/admin/indices/create/CreateIndexIT.java b/server/src/internalClusterTest/java/org/elasticsearch/action/admin/indices/create/CreateIndexIT.java index 26a430123ccd9..27f0cd408e7fb 100644 --- a/server/src/internalClusterTest/java/org/elasticsearch/action/admin/indices/create/CreateIndexIT.java +++ b/server/src/internalClusterTest/java/org/elasticsearch/action/admin/indices/create/CreateIndexIT.java @@ -306,10 +306,7 @@ public void testRestartIndexCreationAfterFullClusterRestart() throws Exception { public void testFailureToCreateIndexCleansUpIndicesService() { final int numReplicas = internalCluster().numDataNodes(); - Settings settings = Settings.builder() - .put(IndexMetadata.INDEX_NUMBER_OF_SHARDS_SETTING.getKey(), 1) - .put(IndexMetadata.INDEX_NUMBER_OF_REPLICAS_SETTING.getKey(), numReplicas) - .build(); + Settings settings = indexSettings(1, numReplicas).build(); assertAcked(indicesAdmin().prepareCreate("test-idx-1").setSettings(settings).addAlias(new Alias("alias1").writeIndex(true)).get()); ActionRequestBuilder builder = indicesAdmin().prepareCreate("test-idx-2") @@ -328,10 +325,7 @@ public void testFailureToCreateIndexCleansUpIndicesService() { */ public void testDefaultWaitForActiveShardsUsesIndexSetting() throws Exception { final int numReplicas = internalCluster().numDataNodes(); - Settings settings = Settings.builder() - .put(SETTING_WAIT_FOR_ACTIVE_SHARDS.getKey(), Integer.toString(numReplicas)) - .put(IndexMetadata.INDEX_NUMBER_OF_SHARDS_SETTING.getKey(), 1) - .put(IndexMetadata.INDEX_NUMBER_OF_REPLICAS_SETTING.getKey(), numReplicas) + Settings settings = indexSettings(1, numReplicas).put(SETTING_WAIT_FOR_ACTIVE_SHARDS.getKey(), Integer.toString(numReplicas)) .build(); assertAcked(indicesAdmin().prepareCreate("test-idx-1").setSettings(settings).get()); diff --git a/server/src/internalClusterTest/java/org/elasticsearch/action/admin/indices/rollover/RolloverIT.java b/server/src/internalClusterTest/java/org/elasticsearch/action/admin/indices/rollover/RolloverIT.java index a568424300e75..16f8f51cb8aae 100644 --- a/server/src/internalClusterTest/java/org/elasticsearch/action/admin/indices/rollover/RolloverIT.java +++ b/server/src/internalClusterTest/java/org/elasticsearch/action/admin/indices/rollover/RolloverIT.java @@ -211,9 +211,8 @@ public void testRolloverWithIndexSettingsWithoutPrefix() throws Exception { assertAcked(prepareCreate("test_index-2").addAlias(testAlias).get()); indexDoc("test_index-2", "1", "field", "value"); flush("test_index-2"); - final Settings settings = Settings.builder().put("number_of_shards", 1).put("number_of_replicas", 0).build(); final RolloverResponse response = indicesAdmin().prepareRolloverIndex("test_alias") - .settings(settings) + .settings(indexSettings(1, 0).build()) .alias(new Alias("extra_alias")) .get(); assertThat(response.getOldIndex(), equalTo("test_index-2")); diff --git a/server/src/internalClusterTest/java/org/elasticsearch/action/bulk/BulkAfterWriteFsyncFailureIT.java b/server/src/internalClusterTest/java/org/elasticsearch/action/bulk/BulkAfterWriteFsyncFailureIT.java index d531686bb5207..0ed585164750a 100644 --- a/server/src/internalClusterTest/java/org/elasticsearch/action/bulk/BulkAfterWriteFsyncFailureIT.java +++ b/server/src/internalClusterTest/java/org/elasticsearch/action/bulk/BulkAfterWriteFsyncFailureIT.java @@ -10,7 +10,6 @@ import org.apache.lucene.tests.mockfile.FilterFileChannel; import org.apache.lucene.tests.mockfile.FilterFileSystemProvider; -import org.elasticsearch.cluster.metadata.IndexMetadata; import org.elasticsearch.common.settings.Settings; import org.elasticsearch.core.PathUtils; import org.elasticsearch.core.PathUtilsForTesting; @@ -59,13 +58,7 @@ public void testFsyncFailureDoesNotAdvanceLocalCheckpoints() { client().admin() .indices() .prepareCreate(indexName) - .setSettings( - Settings.builder() - .put(INDEX_REFRESH_INTERVAL_SETTING.getKey(), -1) - .put(IndexMetadata.SETTING_NUMBER_OF_SHARDS, 1) - .put(IndexMetadata.SETTING_NUMBER_OF_REPLICAS, 0) - .build() - ) + .setSettings(indexSettings(1, 0).put(INDEX_REFRESH_INTERVAL_SETTING.getKey(), -1)) .setMapping("key", "type=keyword", "val", "type=long") .get(); ensureGreen(indexName); diff --git a/server/src/internalClusterTest/java/org/elasticsearch/cluster/allocation/ClusterRerouteIT.java b/server/src/internalClusterTest/java/org/elasticsearch/cluster/allocation/ClusterRerouteIT.java index 7c13171ea76ad..dc93aaa814018 100644 --- a/server/src/internalClusterTest/java/org/elasticsearch/cluster/allocation/ClusterRerouteIT.java +++ b/server/src/internalClusterTest/java/org/elasticsearch/cluster/allocation/ClusterRerouteIT.java @@ -19,7 +19,6 @@ import org.elasticsearch.action.support.WriteRequest.RefreshPolicy; import org.elasticsearch.cluster.ClusterState; import org.elasticsearch.cluster.health.ClusterHealthStatus; -import org.elasticsearch.cluster.metadata.IndexMetadata; import org.elasticsearch.cluster.node.DiscoveryNode; import org.elasticsearch.cluster.routing.ShardRouting; import org.elasticsearch.cluster.routing.ShardRoutingState; @@ -395,14 +394,7 @@ public void testMessageLogging() { assertThat(healthResponse.isTimedOut(), equalTo(false)); final String indexName = "test_index"; - indicesAdmin().prepareCreate(indexName) - .setWaitForActiveShards(ActiveShardCount.NONE) - .setSettings( - Settings.builder() - .put(IndexMetadata.INDEX_NUMBER_OF_SHARDS_SETTING.getKey(), 2) - .put(IndexMetadata.INDEX_NUMBER_OF_REPLICAS_SETTING.getKey(), 1) - ) - .get(); + indicesAdmin().prepareCreate(indexName).setWaitForActiveShards(ActiveShardCount.NONE).setSettings(indexSettings(2, 1)).get(); try (var dryRunMockLog = MockLog.capture(TransportClusterRerouteAction.class)) { dryRunMockLog.addExpectation( diff --git a/server/src/internalClusterTest/java/org/elasticsearch/cluster/routing/allocation/decider/MockDiskUsagesIT.java b/server/src/internalClusterTest/java/org/elasticsearch/cluster/routing/allocation/decider/MockDiskUsagesIT.java index fd5e54631fd7a..7464f83cb2814 100644 --- a/server/src/internalClusterTest/java/org/elasticsearch/cluster/routing/allocation/decider/MockDiskUsagesIT.java +++ b/server/src/internalClusterTest/java/org/elasticsearch/cluster/routing/allocation/decider/MockDiskUsagesIT.java @@ -106,7 +106,7 @@ public void testRerouteOccursOnDiskPassingHighWatermark() throws Exception { } updateClusterSettings(settings); // Create an index with 10 shards so we can check allocation for it - assertAcked(prepareCreate("test").setSettings(Settings.builder().put("number_of_shards", 10).put("number_of_replicas", 0))); + assertAcked(prepareCreate("test").setSettings(indexSettings(10, 0))); ensureGreen("test"); assertBusy(() -> { @@ -184,7 +184,7 @@ public void testAutomaticReleaseOfIndexBlock() throws Exception { updateClusterSettings(builder); // Create an index with 6 shards so we can check allocation for it - prepareCreate("test").setSettings(Settings.builder().put("number_of_shards", 6).put("number_of_replicas", 0)).get(); + prepareCreate("test").setSettings(indexSettings(6, 0)).get(); ensureGreen("test"); { @@ -269,7 +269,7 @@ public void testOnlyMovesEnoughShardsToDropBelowHighWatermark() throws Exception .map(RoutingNode::nodeId) .toList(); - assertAcked(prepareCreate("test").setSettings(Settings.builder().put("number_of_shards", 6).put("number_of_replicas", 0))); + assertAcked(prepareCreate("test").setSettings(indexSettings(6, 0))); ensureGreen("test"); @@ -355,10 +355,10 @@ public void testDoesNotExceedLowWatermarkWhenRebalancing() throws Exception { assertAcked( prepareCreate("test").setSettings( - Settings.builder() - .put("number_of_shards", 6) - .put("number_of_replicas", 0) - .put(IndexMetadata.INDEX_ROUTING_EXCLUDE_GROUP_SETTING.getConcreteSettingForNamespace("_id").getKey(), nodeIds.get(2)) + indexSettings(6, 0).put( + IndexMetadata.INDEX_ROUTING_EXCLUDE_GROUP_SETTING.getConcreteSettingForNamespace("_id").getKey(), + nodeIds.get(2) + ) ) ); ensureGreen("test"); @@ -422,7 +422,7 @@ public void testMovesShardsOffSpecificDataPathAboveWatermark() throws Exception .map(RoutingNode::nodeId) .toList(); - assertAcked(prepareCreate("test").setSettings(Settings.builder().put("number_of_shards", 6).put("number_of_replicas", 0))); + assertAcked(prepareCreate("test").setSettings(indexSettings(6, 0))); ensureGreen("test"); diff --git a/server/src/internalClusterTest/java/org/elasticsearch/discovery/DiscoveryDisruptionIT.java b/server/src/internalClusterTest/java/org/elasticsearch/discovery/DiscoveryDisruptionIT.java index 66fc6e0236b53..cad5c8f524bc7 100644 --- a/server/src/internalClusterTest/java/org/elasticsearch/discovery/DiscoveryDisruptionIT.java +++ b/server/src/internalClusterTest/java/org/elasticsearch/discovery/DiscoveryDisruptionIT.java @@ -17,7 +17,6 @@ import org.elasticsearch.cluster.node.DiscoveryNodes; import org.elasticsearch.cluster.service.ClusterService; import org.elasticsearch.common.breaker.CircuitBreaker; -import org.elasticsearch.common.settings.Settings; import org.elasticsearch.test.ESIntegTestCase; import org.elasticsearch.test.disruption.NetworkDisruption; import org.elasticsearch.test.disruption.ServiceDisruptionScheme; @@ -31,9 +30,6 @@ import java.util.concurrent.CountDownLatch; import java.util.concurrent.CyclicBarrier; -import static org.elasticsearch.cluster.metadata.IndexMetadata.INDEX_NUMBER_OF_REPLICAS_SETTING; -import static org.elasticsearch.cluster.metadata.IndexMetadata.INDEX_NUMBER_OF_SHARDS_SETTING; - /** * Tests for discovery during disruptions. */ @@ -136,13 +132,7 @@ public void testElectMasterWithLatestVersion() throws Exception { internalCluster().setDisruptionScheme(isolatePreferredMaster); isolatePreferredMaster.startDisrupting(); - client(randomFrom(nonPreferredNodes)).admin() - .indices() - .prepareCreate("test") - .setSettings( - Settings.builder().put(INDEX_NUMBER_OF_SHARDS_SETTING.getKey(), 1).put(INDEX_NUMBER_OF_REPLICAS_SETTING.getKey(), 0) - ) - .get(); + client(randomFrom(nonPreferredNodes)).admin().indices().prepareCreate("test").setSettings(indexSettings(1, 0)).get(); internalCluster().clearDisruptionScheme(false); internalCluster().setDisruptionScheme(isolateAllNodes); diff --git a/server/src/internalClusterTest/java/org/elasticsearch/gateway/RecoveryFromGatewayIT.java b/server/src/internalClusterTest/java/org/elasticsearch/gateway/RecoveryFromGatewayIT.java index 3baabe4cc888e..26573644790fa 100644 --- a/server/src/internalClusterTest/java/org/elasticsearch/gateway/RecoveryFromGatewayIT.java +++ b/server/src/internalClusterTest/java/org/elasticsearch/gateway/RecoveryFromGatewayIT.java @@ -441,13 +441,9 @@ public void testReuseInFileBasedPeerRecovery() throws Exception { .indices() .prepareCreate("test") .setSettings( - Settings.builder() - .put("number_of_shards", 1) - .put("number_of_replicas", 1) - + indexSettings(1, 1) // disable merges to keep segments the same .put(MergePolicyConfig.INDEX_MERGE_ENABLED, false) - // expire retention leases quickly .put(IndexService.RETENTION_LEASE_SYNC_INTERVAL_SETTING.getKey(), "100ms") ) diff --git a/server/src/internalClusterTest/java/org/elasticsearch/get/GetActionIT.java b/server/src/internalClusterTest/java/org/elasticsearch/get/GetActionIT.java index a96801b707808..76c501df1fd29 100644 --- a/server/src/internalClusterTest/java/org/elasticsearch/get/GetActionIT.java +++ b/server/src/internalClusterTest/java/org/elasticsearch/get/GetActionIT.java @@ -22,7 +22,6 @@ import org.elasticsearch.action.index.IndexRequest; import org.elasticsearch.action.support.DefaultShardOperationFailedException; import org.elasticsearch.action.support.broadcast.BroadcastResponse; -import org.elasticsearch.cluster.metadata.IndexMetadata; import org.elasticsearch.common.Randomness; import org.elasticsearch.common.Strings; import org.elasticsearch.common.bytes.BytesReference; @@ -849,13 +848,7 @@ public void testAvoidWrappingSearcherInMultiGet() { SearcherWrapperPlugin.enabled = true; assertAcked( prepareCreate("test").setMapping("f", "type=keyword") - .setSettings( - Settings.builder() - .put("index.refresh_interval", "-1") - .put(IndexMetadata.SETTING_NUMBER_OF_SHARDS, 1) - .put(IndexMetadata.SETTING_NUMBER_OF_REPLICAS, 0) - .put("index.routing.rebalance.enable", "none") - ) + .setSettings(indexSettings(1, 0).put("index.refresh_interval", "-1").put("index.routing.rebalance.enable", "none")) ); // start tracking translog locations in the live version map { diff --git a/server/src/internalClusterTest/java/org/elasticsearch/get/GetFromTranslogActionIT.java b/server/src/internalClusterTest/java/org/elasticsearch/get/GetFromTranslogActionIT.java index 224f0dd4dc822..a77c01e199942 100644 --- a/server/src/internalClusterTest/java/org/elasticsearch/get/GetFromTranslogActionIT.java +++ b/server/src/internalClusterTest/java/org/elasticsearch/get/GetFromTranslogActionIT.java @@ -13,9 +13,7 @@ import org.elasticsearch.action.get.TransportGetFromTranslogAction; import org.elasticsearch.action.get.TransportGetFromTranslogAction.Response; import org.elasticsearch.action.support.PlainActionFuture; -import org.elasticsearch.cluster.metadata.IndexMetadata; import org.elasticsearch.cluster.routing.ShardRouting; -import org.elasticsearch.common.settings.Settings; import org.elasticsearch.indices.IndicesService; import org.elasticsearch.test.ESIntegTestCase; import org.elasticsearch.threadpool.ThreadPool; @@ -34,11 +32,8 @@ public void testGetFromTranslog() throws Exception { assertAcked( prepareCreate(INDEX).setMapping("field1", "type=keyword,store=true") .setSettings( - Settings.builder() - .put("index.refresh_interval", -1) - // A GetFromTranslogAction runs only Stateless where there is only one active indexing shard. - .put(IndexMetadata.SETTING_NUMBER_OF_REPLICAS, 0) - .put(IndexMetadata.SETTING_NUMBER_OF_SHARDS, 1) + // A GetFromTranslogAction runs only Stateless where there is only one active indexing shard. + indexSettings(1, 0).put("index.refresh_interval", -1) ) .addAlias(new Alias(ALIAS).writeIndex(randomFrom(true, false, null))) ); diff --git a/server/src/internalClusterTest/java/org/elasticsearch/get/ShardMultiGetFomTranslogActionIT.java b/server/src/internalClusterTest/java/org/elasticsearch/get/ShardMultiGetFomTranslogActionIT.java index b09a0284eba05..6da2802ce7cc8 100644 --- a/server/src/internalClusterTest/java/org/elasticsearch/get/ShardMultiGetFomTranslogActionIT.java +++ b/server/src/internalClusterTest/java/org/elasticsearch/get/ShardMultiGetFomTranslogActionIT.java @@ -15,9 +15,7 @@ import org.elasticsearch.action.index.IndexRequest; import org.elasticsearch.action.support.PlainActionFuture; import org.elasticsearch.action.support.WriteRequest; -import org.elasticsearch.cluster.metadata.IndexMetadata; import org.elasticsearch.cluster.routing.ShardRouting; -import org.elasticsearch.common.settings.Settings; import org.elasticsearch.indices.IndicesService; import org.elasticsearch.test.ESIntegTestCase; import org.elasticsearch.threadpool.ThreadPool; @@ -42,11 +40,8 @@ public class ShardMultiGetFomTranslogActionIT extends ESIntegTestCase { public void testShardMultiGetFromTranslog() throws Exception { assertAcked( prepareCreate(INDEX).setSettings( - Settings.builder() - .put("index.refresh_interval", -1) - // A ShardMultiGetFromTranslogAction runs only Stateless where there is only one active indexing shard. - .put(IndexMetadata.SETTING_NUMBER_OF_REPLICAS, 0) - .put(IndexMetadata.SETTING_NUMBER_OF_SHARDS, 1) + // A ShardMultiGetFromTranslogAction runs only Stateless where there is only one active indexing shard. + indexSettings(1, 0).put("index.refresh_interval", -1) ).addAlias(new Alias(ALIAS).writeIndex(randomFrom(true, false, null))) ); ensureGreen(); diff --git a/server/src/internalClusterTest/java/org/elasticsearch/indices/recovery/IndexRecoveryIT.java b/server/src/internalClusterTest/java/org/elasticsearch/indices/recovery/IndexRecoveryIT.java index d56e4a372c17c..fbbeec4b4e9ba 100644 --- a/server/src/internalClusterTest/java/org/elasticsearch/indices/recovery/IndexRecoveryIT.java +++ b/server/src/internalClusterTest/java/org/elasticsearch/indices/recovery/IndexRecoveryIT.java @@ -914,10 +914,7 @@ private IndicesStatsResponse createAndPopulateIndex(String name, int nodeCount, prepareCreate( name, nodeCount, - Settings.builder() - .put("number_of_shards", shardCount) - .put("number_of_replicas", replicaCount) - .put(Store.INDEX_STORE_STATS_REFRESH_INTERVAL_SETTING.getKey(), 0) + indexSettings(shardCount, replicaCount).put(Store.INDEX_STORE_STATS_REFRESH_INTERVAL_SETTING.getKey(), 0) ) ); ensureGreen(); diff --git a/server/src/internalClusterTest/java/org/elasticsearch/indices/state/OpenCloseIndexIT.java b/server/src/internalClusterTest/java/org/elasticsearch/indices/state/OpenCloseIndexIT.java index 5201e4ab3d812..a7a2af57ef810 100644 --- a/server/src/internalClusterTest/java/org/elasticsearch/indices/state/OpenCloseIndexIT.java +++ b/server/src/internalClusterTest/java/org/elasticsearch/indices/state/OpenCloseIndexIT.java @@ -227,10 +227,7 @@ public void testCloseOpenAliasMultipleIndices() { public void testOpenWaitingForActiveShardsFailed() throws Exception { Client client = client(); - Settings settings = Settings.builder() - .put(IndexMetadata.INDEX_NUMBER_OF_SHARDS_SETTING.getKey(), 1) - .put(IndexMetadata.INDEX_NUMBER_OF_REPLICAS_SETTING.getKey(), 0) - .build(); + Settings settings = indexSettings(1, 0).build(); assertAcked(client.admin().indices().prepareCreate("test").setSettings(settings).get()); assertAcked(client.admin().indices().prepareClose("test").get()); diff --git a/server/src/internalClusterTest/java/org/elasticsearch/search/aggregations/bucket/DateHistogramIT.java b/server/src/internalClusterTest/java/org/elasticsearch/search/aggregations/bucket/DateHistogramIT.java index efb283f047bb2..b83b74ca8b639 100644 --- a/server/src/internalClusterTest/java/org/elasticsearch/search/aggregations/bucket/DateHistogramIT.java +++ b/server/src/internalClusterTest/java/org/elasticsearch/search/aggregations/bucket/DateHistogramIT.java @@ -1549,8 +1549,7 @@ public void testDSTEndTransition() throws Exception { */ public void testScriptCaching() throws Exception { assertAcked( - prepareCreate("cache_test_idx").setMapping("d", "type=date") - .setSettings(Settings.builder().put("requests.cache.enable", true).put("number_of_shards", 1).put("number_of_replicas", 1)) + prepareCreate("cache_test_idx").setMapping("d", "type=date").setSettings(indexSettings(1, 1).put("requests.cache.enable", true)) ); String date = DateFieldMapper.DEFAULT_DATE_TIME_FORMATTER.format(date(1, 1)); String date2 = DateFieldMapper.DEFAULT_DATE_TIME_FORMATTER.format(date(2, 1)); diff --git a/server/src/internalClusterTest/java/org/elasticsearch/search/aggregations/bucket/DateRangeIT.java b/server/src/internalClusterTest/java/org/elasticsearch/search/aggregations/bucket/DateRangeIT.java index 0b92372652597..9df1fae2431f0 100644 --- a/server/src/internalClusterTest/java/org/elasticsearch/search/aggregations/bucket/DateRangeIT.java +++ b/server/src/internalClusterTest/java/org/elasticsearch/search/aggregations/bucket/DateRangeIT.java @@ -10,7 +10,6 @@ import org.elasticsearch.ElasticsearchException; import org.elasticsearch.action.index.IndexRequestBuilder; import org.elasticsearch.action.search.SearchPhaseExecutionException; -import org.elasticsearch.common.settings.Settings; import org.elasticsearch.plugins.Plugin; import org.elasticsearch.script.Script; import org.elasticsearch.script.ScriptType; @@ -616,7 +615,7 @@ public void testNoRangesInQuery() { public void testScriptCaching() throws Exception { assertAcked( prepareCreate("cache_test_idx").setMapping("date", "type=date") - .setSettings(Settings.builder().put("requests.cache.enable", true).put("number_of_shards", 1).put("number_of_replicas", 1)) + .setSettings(indexSettings(1, 1).put("requests.cache.enable", true)) ); indexRandom( true, diff --git a/server/src/internalClusterTest/java/org/elasticsearch/search/aggregations/bucket/DoubleTermsIT.java b/server/src/internalClusterTest/java/org/elasticsearch/search/aggregations/bucket/DoubleTermsIT.java index 668b9d79c49a8..237f296f25751 100644 --- a/server/src/internalClusterTest/java/org/elasticsearch/search/aggregations/bucket/DoubleTermsIT.java +++ b/server/src/internalClusterTest/java/org/elasticsearch/search/aggregations/bucket/DoubleTermsIT.java @@ -9,7 +9,6 @@ import org.elasticsearch.ElasticsearchException; import org.elasticsearch.action.index.IndexRequestBuilder; -import org.elasticsearch.common.settings.Settings; import org.elasticsearch.core.Strings; import org.elasticsearch.index.fielddata.ScriptDocValues; import org.elasticsearch.index.query.QueryBuilders; @@ -942,7 +941,7 @@ public void testOtherDocCount() { public void testScriptCaching() throws Exception { assertAcked( prepareCreate("cache_test_idx").setMapping("d", "type=float") - .setSettings(Settings.builder().put("requests.cache.enable", true).put("number_of_shards", 1).put("number_of_replicas", 1)) + .setSettings(indexSettings(1, 1).put("requests.cache.enable", true)) ); indexRandom( true, diff --git a/server/src/internalClusterTest/java/org/elasticsearch/search/aggregations/bucket/HistogramIT.java b/server/src/internalClusterTest/java/org/elasticsearch/search/aggregations/bucket/HistogramIT.java index 5894837e257bf..d117f593348d6 100644 --- a/server/src/internalClusterTest/java/org/elasticsearch/search/aggregations/bucket/HistogramIT.java +++ b/server/src/internalClusterTest/java/org/elasticsearch/search/aggregations/bucket/HistogramIT.java @@ -10,7 +10,6 @@ import org.elasticsearch.ElasticsearchException; import org.elasticsearch.action.index.IndexRequestBuilder; import org.elasticsearch.action.search.SearchPhaseExecutionException; -import org.elasticsearch.common.settings.Settings; import org.elasticsearch.index.query.QueryBuilders; import org.elasticsearch.plugins.Plugin; import org.elasticsearch.script.MockScriptPlugin; @@ -1115,7 +1114,7 @@ public void testDecimalIntervalAndOffset() throws Exception { public void testScriptCaching() throws Exception { assertAcked( prepareCreate("cache_test_idx").setMapping("d", "type=float") - .setSettings(Settings.builder().put("requests.cache.enable", true).put("number_of_shards", 1).put("number_of_replicas", 1)) + .setSettings(indexSettings(1, 1).put("requests.cache.enable", true)) ); indexRandom( true, diff --git a/server/src/internalClusterTest/java/org/elasticsearch/search/aggregations/bucket/LongTermsIT.java b/server/src/internalClusterTest/java/org/elasticsearch/search/aggregations/bucket/LongTermsIT.java index f0c5cbf9c76bb..cbb3850422800 100644 --- a/server/src/internalClusterTest/java/org/elasticsearch/search/aggregations/bucket/LongTermsIT.java +++ b/server/src/internalClusterTest/java/org/elasticsearch/search/aggregations/bucket/LongTermsIT.java @@ -9,7 +9,6 @@ import org.elasticsearch.ElasticsearchException; import org.elasticsearch.action.index.IndexRequestBuilder; -import org.elasticsearch.common.settings.Settings; import org.elasticsearch.core.Strings; import org.elasticsearch.index.fielddata.ScriptDocValues; import org.elasticsearch.index.query.QueryBuilders; @@ -899,8 +898,7 @@ public void testOtherDocCount() { */ public void testScriptCaching() throws Exception { assertAcked( - prepareCreate("cache_test_idx").setMapping("d", "type=long") - .setSettings(Settings.builder().put("requests.cache.enable", true).put("number_of_shards", 1).put("number_of_replicas", 1)) + prepareCreate("cache_test_idx").setMapping("d", "type=long").setSettings(indexSettings(1, 1).put("requests.cache.enable", true)) ); indexRandom( true, diff --git a/server/src/internalClusterTest/java/org/elasticsearch/search/aggregations/bucket/RangeIT.java b/server/src/internalClusterTest/java/org/elasticsearch/search/aggregations/bucket/RangeIT.java index 10e3649e9f161..6a60969e632ee 100644 --- a/server/src/internalClusterTest/java/org/elasticsearch/search/aggregations/bucket/RangeIT.java +++ b/server/src/internalClusterTest/java/org/elasticsearch/search/aggregations/bucket/RangeIT.java @@ -9,7 +9,6 @@ import org.elasticsearch.action.index.IndexRequestBuilder; import org.elasticsearch.action.search.SearchPhaseExecutionException; -import org.elasticsearch.common.settings.Settings; import org.elasticsearch.index.fielddata.ScriptDocValues; import org.elasticsearch.plugins.Plugin; import org.elasticsearch.script.Script; @@ -896,7 +895,7 @@ public void testEmptyAggregation() throws Exception { public void testScriptCaching() throws Exception { assertAcked( prepareCreate("cache_test_idx").setMapping("i", "type=integer") - .setSettings(Settings.builder().put("requests.cache.enable", true).put("number_of_shards", 1).put("number_of_replicas", 1)) + .setSettings(indexSettings(1, 1).put("requests.cache.enable", true)) ); indexRandom( true, diff --git a/server/src/internalClusterTest/java/org/elasticsearch/search/aggregations/bucket/SignificantTermsSignificanceScoreIT.java b/server/src/internalClusterTest/java/org/elasticsearch/search/aggregations/bucket/SignificantTermsSignificanceScoreIT.java index 35a117ac8922b..5fa010e4b091e 100644 --- a/server/src/internalClusterTest/java/org/elasticsearch/search/aggregations/bucket/SignificantTermsSignificanceScoreIT.java +++ b/server/src/internalClusterTest/java/org/elasticsearch/search/aggregations/bucket/SignificantTermsSignificanceScoreIT.java @@ -10,7 +10,6 @@ import org.elasticsearch.action.index.IndexRequestBuilder; import org.elasticsearch.action.search.SearchRequestBuilder; import org.elasticsearch.common.Strings; -import org.elasticsearch.common.settings.Settings; import org.elasticsearch.common.xcontent.XContentHelper; import org.elasticsearch.index.query.QueryBuilder; import org.elasticsearch.index.query.QueryBuilders; @@ -548,7 +547,7 @@ public void testReduceFromSeveralShards() throws IOException, ExecutionException public void testScriptCaching() throws Exception { assertAcked( prepareCreate("cache_test_idx").setMapping("s", "type=long", "t", "type=text") - .setSettings(Settings.builder().put("requests.cache.enable", true).put("number_of_shards", 1).put("number_of_replicas", 1)) + .setSettings(indexSettings(1, 1).put("requests.cache.enable", true)) ); indexRandom( true, diff --git a/server/src/internalClusterTest/java/org/elasticsearch/search/aggregations/bucket/terms/RareTermsIT.java b/server/src/internalClusterTest/java/org/elasticsearch/search/aggregations/bucket/terms/RareTermsIT.java index c45cabf425b14..69b84c6b98286 100644 --- a/server/src/internalClusterTest/java/org/elasticsearch/search/aggregations/bucket/terms/RareTermsIT.java +++ b/server/src/internalClusterTest/java/org/elasticsearch/search/aggregations/bucket/terms/RareTermsIT.java @@ -10,7 +10,6 @@ import org.elasticsearch.action.bulk.BulkRequestBuilder; import org.elasticsearch.action.index.IndexRequest; -import org.elasticsearch.cluster.metadata.IndexMetadata; import org.elasticsearch.common.settings.Settings; import org.elasticsearch.search.SearchHit; import org.elasticsearch.search.aggregations.InternalMultiBucketAggregation; @@ -46,9 +45,7 @@ private void indexDocs(int numDocs) { } public void testSingleValuedString() { - final Settings.Builder settings = Settings.builder() - .put(IndexMetadata.INDEX_NUMBER_OF_SHARDS_SETTING.getKey(), 2) - .put(IndexMetadata.INDEX_NUMBER_OF_REPLICAS_SETTING.getKey(), 0); + final Settings.Builder settings = indexSettings(2, 0); createIndex(index, settings.build()); // We want to trigger the usage of cuckoo filters that happen only when there are // more than 10k distinct values in one shard. diff --git a/server/src/internalClusterTest/java/org/elasticsearch/search/aggregations/bucket/terms/StringTermsIT.java b/server/src/internalClusterTest/java/org/elasticsearch/search/aggregations/bucket/terms/StringTermsIT.java index 662744ddfe77e..fa9a9ef2a7f41 100644 --- a/server/src/internalClusterTest/java/org/elasticsearch/search/aggregations/bucket/terms/StringTermsIT.java +++ b/server/src/internalClusterTest/java/org/elasticsearch/search/aggregations/bucket/terms/StringTermsIT.java @@ -11,7 +11,6 @@ import org.elasticsearch.action.index.IndexRequestBuilder; import org.elasticsearch.action.search.SearchPhaseExecutionException; import org.elasticsearch.common.Strings; -import org.elasticsearch.common.settings.Settings; import org.elasticsearch.index.fielddata.ScriptDocValues; import org.elasticsearch.index.mapper.IndexFieldMapper; import org.elasticsearch.index.query.QueryBuilders; @@ -1198,7 +1197,7 @@ public void testOtherDocCount() { public void testScriptCaching() throws Exception { assertAcked( prepareCreate("cache_test_idx").setMapping("d", "type=keyword") - .setSettings(Settings.builder().put("requests.cache.enable", true).put("number_of_shards", 1).put("number_of_replicas", 1)) + .setSettings(indexSettings(1, 1).put("requests.cache.enable", true)) ); indexRandom( true, diff --git a/server/src/internalClusterTest/java/org/elasticsearch/search/aggregations/metrics/ExtendedStatsIT.java b/server/src/internalClusterTest/java/org/elasticsearch/search/aggregations/metrics/ExtendedStatsIT.java index 750868defde97..fde18fb283a6e 100644 --- a/server/src/internalClusterTest/java/org/elasticsearch/search/aggregations/metrics/ExtendedStatsIT.java +++ b/server/src/internalClusterTest/java/org/elasticsearch/search/aggregations/metrics/ExtendedStatsIT.java @@ -7,7 +7,6 @@ */ package org.elasticsearch.search.aggregations.metrics; -import org.elasticsearch.common.settings.Settings; import org.elasticsearch.plugins.Plugin; import org.elasticsearch.script.Script; import org.elasticsearch.script.ScriptType; @@ -905,8 +904,7 @@ private void checkUpperLowerBounds(ExtendedStats stats, double sigma) { */ public void testScriptCaching() throws Exception { assertAcked( - prepareCreate("cache_test_idx").setMapping("d", "type=long") - .setSettings(Settings.builder().put("requests.cache.enable", true).put("number_of_shards", 1).put("number_of_replicas", 1)) + prepareCreate("cache_test_idx").setMapping("d", "type=long").setSettings(indexSettings(1, 1).put("requests.cache.enable", true)) ); indexRandom( true, diff --git a/server/src/internalClusterTest/java/org/elasticsearch/search/aggregations/metrics/HDRPercentileRanksIT.java b/server/src/internalClusterTest/java/org/elasticsearch/search/aggregations/metrics/HDRPercentileRanksIT.java index 13d66a5cf3949..a4da7c4e893be 100644 --- a/server/src/internalClusterTest/java/org/elasticsearch/search/aggregations/metrics/HDRPercentileRanksIT.java +++ b/server/src/internalClusterTest/java/org/elasticsearch/search/aggregations/metrics/HDRPercentileRanksIT.java @@ -8,7 +8,6 @@ package org.elasticsearch.search.aggregations.metrics; import org.apache.logging.log4j.LogManager; -import org.elasticsearch.common.settings.Settings; import org.elasticsearch.plugins.Plugin; import org.elasticsearch.script.Script; import org.elasticsearch.script.ScriptType; @@ -568,8 +567,7 @@ public void testOrderByEmptyAggregation() throws Exception { */ public void testScriptCaching() throws Exception { assertAcked( - prepareCreate("cache_test_idx").setMapping("d", "type=long") - .setSettings(Settings.builder().put("requests.cache.enable", true).put("number_of_shards", 1).put("number_of_replicas", 1)) + prepareCreate("cache_test_idx").setMapping("d", "type=long").setSettings(indexSettings(1, 1).put("requests.cache.enable", true)) ); indexRandom( true, diff --git a/server/src/internalClusterTest/java/org/elasticsearch/search/aggregations/metrics/HDRPercentilesIT.java b/server/src/internalClusterTest/java/org/elasticsearch/search/aggregations/metrics/HDRPercentilesIT.java index cd69fb8241ef2..43e4aecb07f7f 100644 --- a/server/src/internalClusterTest/java/org/elasticsearch/search/aggregations/metrics/HDRPercentilesIT.java +++ b/server/src/internalClusterTest/java/org/elasticsearch/search/aggregations/metrics/HDRPercentilesIT.java @@ -8,7 +8,6 @@ package org.elasticsearch.search.aggregations.metrics; import org.apache.logging.log4j.LogManager; -import org.elasticsearch.common.settings.Settings; import org.elasticsearch.common.util.CollectionUtils; import org.elasticsearch.plugins.Plugin; import org.elasticsearch.script.Script; @@ -541,8 +540,7 @@ public void testOrderByEmptyAggregation() throws Exception { */ public void testScriptCaching() throws Exception { assertAcked( - prepareCreate("cache_test_idx").setMapping("d", "type=long") - .setSettings(Settings.builder().put("requests.cache.enable", true).put("number_of_shards", 1).put("number_of_replicas", 1)) + prepareCreate("cache_test_idx").setMapping("d", "type=long").setSettings(indexSettings(1, 1).put("requests.cache.enable", true)) ); indexRandom( true, diff --git a/server/src/internalClusterTest/java/org/elasticsearch/search/aggregations/metrics/MedianAbsoluteDeviationIT.java b/server/src/internalClusterTest/java/org/elasticsearch/search/aggregations/metrics/MedianAbsoluteDeviationIT.java index 6c80931914ac6..4ded290f93961 100644 --- a/server/src/internalClusterTest/java/org/elasticsearch/search/aggregations/metrics/MedianAbsoluteDeviationIT.java +++ b/server/src/internalClusterTest/java/org/elasticsearch/search/aggregations/metrics/MedianAbsoluteDeviationIT.java @@ -9,7 +9,6 @@ package org.elasticsearch.search.aggregations.metrics; import org.elasticsearch.action.index.IndexRequestBuilder; -import org.elasticsearch.common.settings.Settings; import org.elasticsearch.plugins.Plugin; import org.elasticsearch.script.Script; import org.elasticsearch.script.ScriptType; @@ -494,8 +493,7 @@ public void testOrderByEmptyAggregation() throws Exception { */ public void testScriptCaching() throws Exception { assertAcked( - prepareCreate("cache_test_idx").setMapping("d", "type=long") - .setSettings(Settings.builder().put("requests.cache.enable", true).put("number_of_shards", 1).put("number_of_replicas", 1)) + prepareCreate("cache_test_idx").setMapping("d", "type=long").setSettings(indexSettings(1, 1).put("requests.cache.enable", true)) ); indexRandom( diff --git a/server/src/internalClusterTest/java/org/elasticsearch/search/aggregations/metrics/ScriptedMetricIT.java b/server/src/internalClusterTest/java/org/elasticsearch/search/aggregations/metrics/ScriptedMetricIT.java index 5fcc2cf858ab2..042c8c1fb0e35 100644 --- a/server/src/internalClusterTest/java/org/elasticsearch/search/aggregations/metrics/ScriptedMetricIT.java +++ b/server/src/internalClusterTest/java/org/elasticsearch/search/aggregations/metrics/ScriptedMetricIT.java @@ -10,7 +10,6 @@ import org.elasticsearch.action.index.IndexRequestBuilder; import org.elasticsearch.action.search.SearchPhaseExecutionException; -import org.elasticsearch.common.settings.Settings; import org.elasticsearch.common.xcontent.support.XContentMapValues; import org.elasticsearch.core.Strings; import org.elasticsearch.plugins.Plugin; @@ -1138,8 +1137,7 @@ public void testScriptCaching() throws Exception { Script ndRandom = new Script(ScriptType.INLINE, CustomScriptPlugin.NAME, "return Math.random()", Collections.emptyMap()); assertAcked( - prepareCreate("cache_test_idx").setMapping("d", "type=long") - .setSettings(Settings.builder().put("requests.cache.enable", true).put("number_of_shards", 1).put("number_of_replicas", 1)) + prepareCreate("cache_test_idx").setMapping("d", "type=long").setSettings(indexSettings(1, 1).put("requests.cache.enable", true)) ); indexRandom( true, diff --git a/server/src/internalClusterTest/java/org/elasticsearch/search/aggregations/metrics/StatsIT.java b/server/src/internalClusterTest/java/org/elasticsearch/search/aggregations/metrics/StatsIT.java index 84e0bee396c9d..78adca3377f0b 100644 --- a/server/src/internalClusterTest/java/org/elasticsearch/search/aggregations/metrics/StatsIT.java +++ b/server/src/internalClusterTest/java/org/elasticsearch/search/aggregations/metrics/StatsIT.java @@ -9,7 +9,6 @@ import org.elasticsearch.action.search.SearchResponse; import org.elasticsearch.action.search.ShardSearchFailure; -import org.elasticsearch.common.settings.Settings; import org.elasticsearch.plugins.Plugin; import org.elasticsearch.script.Script; import org.elasticsearch.script.ScriptType; @@ -234,8 +233,7 @@ private void assertShardExecutionState(SearchResponse response, int expectedFail */ public void testScriptCaching() throws Exception { assertAcked( - prepareCreate("cache_test_idx").setMapping("d", "type=long") - .setSettings(Settings.builder().put("requests.cache.enable", true).put("number_of_shards", 1).put("number_of_replicas", 1)) + prepareCreate("cache_test_idx").setMapping("d", "type=long").setSettings(indexSettings(1, 1).put("requests.cache.enable", true)) ); indexRandom( true, diff --git a/server/src/internalClusterTest/java/org/elasticsearch/search/aggregations/metrics/SumIT.java b/server/src/internalClusterTest/java/org/elasticsearch/search/aggregations/metrics/SumIT.java index d50c101dbd5d1..fd173b8f48a12 100644 --- a/server/src/internalClusterTest/java/org/elasticsearch/search/aggregations/metrics/SumIT.java +++ b/server/src/internalClusterTest/java/org/elasticsearch/search/aggregations/metrics/SumIT.java @@ -8,7 +8,6 @@ package org.elasticsearch.search.aggregations.metrics; import org.elasticsearch.action.index.IndexRequestBuilder; -import org.elasticsearch.common.settings.Settings; import org.elasticsearch.plugins.Plugin; import org.elasticsearch.script.Script; import org.elasticsearch.script.ScriptType; @@ -208,8 +207,7 @@ public void testOrderByEmptyAggregation() throws Exception { */ public void testScriptCaching() throws Exception { assertAcked( - prepareCreate("cache_test_idx").setMapping("d", "type=long") - .setSettings(Settings.builder().put("requests.cache.enable", true).put("number_of_shards", 1).put("number_of_replicas", 1)) + prepareCreate("cache_test_idx").setMapping("d", "type=long").setSettings(indexSettings(1, 1).put("requests.cache.enable", true)) ); indexRandom( true, diff --git a/server/src/internalClusterTest/java/org/elasticsearch/search/aggregations/metrics/TDigestPercentileRanksIT.java b/server/src/internalClusterTest/java/org/elasticsearch/search/aggregations/metrics/TDigestPercentileRanksIT.java index 9c737cb734f16..9c11b6cd14d54 100644 --- a/server/src/internalClusterTest/java/org/elasticsearch/search/aggregations/metrics/TDigestPercentileRanksIT.java +++ b/server/src/internalClusterTest/java/org/elasticsearch/search/aggregations/metrics/TDigestPercentileRanksIT.java @@ -8,7 +8,6 @@ package org.elasticsearch.search.aggregations.metrics; import org.apache.logging.log4j.LogManager; -import org.elasticsearch.common.settings.Settings; import org.elasticsearch.common.util.CollectionUtils; import org.elasticsearch.plugins.Plugin; import org.elasticsearch.script.Script; @@ -485,8 +484,7 @@ public void testOrderByEmptyAggregation() throws Exception { */ public void testScriptCaching() throws Exception { assertAcked( - prepareCreate("cache_test_idx").setMapping("d", "type=long") - .setSettings(Settings.builder().put("requests.cache.enable", true).put("number_of_shards", 1).put("number_of_replicas", 1)) + prepareCreate("cache_test_idx").setMapping("d", "type=long").setSettings(indexSettings(1, 1).put("requests.cache.enable", true)) ); indexRandom( true, diff --git a/server/src/internalClusterTest/java/org/elasticsearch/search/aggregations/metrics/TDigestPercentilesIT.java b/server/src/internalClusterTest/java/org/elasticsearch/search/aggregations/metrics/TDigestPercentilesIT.java index 1c101324cd5fc..c67a237b2fc17 100644 --- a/server/src/internalClusterTest/java/org/elasticsearch/search/aggregations/metrics/TDigestPercentilesIT.java +++ b/server/src/internalClusterTest/java/org/elasticsearch/search/aggregations/metrics/TDigestPercentilesIT.java @@ -8,7 +8,6 @@ package org.elasticsearch.search.aggregations.metrics; import org.apache.logging.log4j.LogManager; -import org.elasticsearch.common.settings.Settings; import org.elasticsearch.common.util.CollectionUtils; import org.elasticsearch.plugins.Plugin; import org.elasticsearch.script.Script; @@ -457,8 +456,7 @@ public void testOrderByEmptyAggregation() throws Exception { */ public void testScriptCaching() throws Exception { assertAcked( - prepareCreate("cache_test_idx").setMapping("d", "type=long") - .setSettings(Settings.builder().put("requests.cache.enable", true).put("number_of_shards", 1).put("number_of_replicas", 1)) + prepareCreate("cache_test_idx").setMapping("d", "type=long").setSettings(indexSettings(1, 1).put("requests.cache.enable", true)) ); indexRandom( true, diff --git a/server/src/internalClusterTest/java/org/elasticsearch/search/aggregations/metrics/TopHitsIT.java b/server/src/internalClusterTest/java/org/elasticsearch/search/aggregations/metrics/TopHitsIT.java index fc753b0844c46..42f04ff54c82a 100644 --- a/server/src/internalClusterTest/java/org/elasticsearch/search/aggregations/metrics/TopHitsIT.java +++ b/server/src/internalClusterTest/java/org/elasticsearch/search/aggregations/metrics/TopHitsIT.java @@ -1082,9 +1082,7 @@ public void testScriptCaching() throws Exception { try { assertAcked( prepareCreate("cache_test_idx").setMapping("d", "type=long") - .setSettings( - Settings.builder().put("requests.cache.enable", true).put("number_of_shards", 1).put("number_of_replicas", 1) - ) + .setSettings(indexSettings(1, 1).put("requests.cache.enable", true)) ); indexRandom( true, diff --git a/server/src/internalClusterTest/java/org/elasticsearch/search/aggregations/metrics/ValueCountIT.java b/server/src/internalClusterTest/java/org/elasticsearch/search/aggregations/metrics/ValueCountIT.java index c3feff6f3eaaa..445ad8e0b9b11 100644 --- a/server/src/internalClusterTest/java/org/elasticsearch/search/aggregations/metrics/ValueCountIT.java +++ b/server/src/internalClusterTest/java/org/elasticsearch/search/aggregations/metrics/ValueCountIT.java @@ -7,7 +7,6 @@ */ package org.elasticsearch.search.aggregations.metrics; -import org.elasticsearch.common.settings.Settings; import org.elasticsearch.plugins.Plugin; import org.elasticsearch.script.Script; import org.elasticsearch.script.ScriptType; @@ -215,8 +214,7 @@ public void testMultiValuedScriptWithParams() throws Exception { */ public void testScriptCaching() throws Exception { assertAcked( - prepareCreate("cache_test_idx").setMapping("d", "type=long") - .setSettings(Settings.builder().put("requests.cache.enable", true).put("number_of_shards", 1).put("number_of_replicas", 1)) + prepareCreate("cache_test_idx").setMapping("d", "type=long").setSettings(indexSettings(1, 1).put("requests.cache.enable", true)) ); indexRandom( true, diff --git a/server/src/internalClusterTest/java/org/elasticsearch/search/profile/aggregation/AggregationProfilerIT.java b/server/src/internalClusterTest/java/org/elasticsearch/search/profile/aggregation/AggregationProfilerIT.java index 0acf9be574ffe..81bb143c47729 100644 --- a/server/src/internalClusterTest/java/org/elasticsearch/search/profile/aggregation/AggregationProfilerIT.java +++ b/server/src/internalClusterTest/java/org/elasticsearch/search/profile/aggregation/AggregationProfilerIT.java @@ -93,7 +93,7 @@ protected int numberOfShards() { protected void setupSuiteScopeCluster() throws Exception { assertAcked( indicesAdmin().prepareCreate("idx") - .setSettings(Map.of("number_of_shards", 1, "number_of_replicas", 0)) + .setSettings(indexSettings(1, 0)) .setMapping(STRING_FIELD, "type=keyword", NUMBER_FIELD, "type=integer", TAG_FIELD, "type=keyword") ); List builders = new ArrayList<>(); @@ -634,11 +634,7 @@ public void testNoProfile() { * documents and that is hard to express in yaml. */ public void testFilterByFilter() throws InterruptedException, IOException { - assertAcked( - indicesAdmin().prepareCreate("dateidx") - .setSettings(Map.of("number_of_shards", 1, "number_of_replicas", 0)) - .setMapping("date", "type=date") - ); + assertAcked(indicesAdmin().prepareCreate("dateidx").setSettings(indexSettings(1, 0)).setMapping("date", "type=date")); List builders = new ArrayList<>(); for (int i = 0; i < RangeAggregator.DOCS_PER_RANGE_TO_USE_FILTERS * 2; i++) { String date = Instant.ofEpochSecond(i).toString(); @@ -713,7 +709,7 @@ public void testDateHistogramFilterByFilterDisabled() throws InterruptedExceptio try { assertAcked( indicesAdmin().prepareCreate("date_filter_by_filter_disabled") - .setSettings(Map.of("number_of_shards", 1, "number_of_replicas", 0)) + .setSettings(indexSettings(1, 0)) .setMapping("date", "type=date", "keyword", "type=keyword") ); List builders = new ArrayList<>(); diff --git a/server/src/internalClusterTest/java/org/elasticsearch/snapshots/RepositoriesIT.java b/server/src/internalClusterTest/java/org/elasticsearch/snapshots/RepositoriesIT.java index 057d7124f83d9..d4c0a4c80a3b5 100644 --- a/server/src/internalClusterTest/java/org/elasticsearch/snapshots/RepositoriesIT.java +++ b/server/src/internalClusterTest/java/org/elasticsearch/snapshots/RepositoriesIT.java @@ -298,7 +298,7 @@ public void testRepositoryConflict() throws Exception { logger.info("--> snapshot"); final String index = "test-idx"; - assertAcked(prepareCreate(index, 1, Settings.builder().put("number_of_shards", 1).put("number_of_replicas", 0))); + assertAcked(prepareCreate(index, 1, indexSettings(1, 0))); for (int i = 0; i < 10; i++) { indexDoc(index, Integer.toString(i), "foo", "bar" + i); } diff --git a/server/src/internalClusterTest/java/org/elasticsearch/snapshots/SharedClusterSnapshotRestoreIT.java b/server/src/internalClusterTest/java/org/elasticsearch/snapshots/SharedClusterSnapshotRestoreIT.java index a651537c77539..531e9f4f45afa 100644 --- a/server/src/internalClusterTest/java/org/elasticsearch/snapshots/SharedClusterSnapshotRestoreIT.java +++ b/server/src/internalClusterTest/java/org/elasticsearch/snapshots/SharedClusterSnapshotRestoreIT.java @@ -1788,9 +1788,7 @@ public void testSnapshotCanceledOnRemovedShard() throws Exception { final String index = "test-idx"; final String snapshot = "test-snap"; - assertAcked( - prepareCreate(index, 1, Settings.builder().put("number_of_shards", numPrimaries).put("number_of_replicas", numReplicas)) - ); + assertAcked(prepareCreate(index, 1, indexSettings(numPrimaries, numReplicas))); indexRandomDocs(index, 100); diff --git a/server/src/internalClusterTest/java/org/elasticsearch/snapshots/SnapshotStressTestsIT.java b/server/src/internalClusterTest/java/org/elasticsearch/snapshots/SnapshotStressTestsIT.java index ca1b93502ade1..9c9076dff00e2 100644 --- a/server/src/internalClusterTest/java/org/elasticsearch/snapshots/SnapshotStressTestsIT.java +++ b/server/src/internalClusterTest/java/org/elasticsearch/snapshots/SnapshotStressTestsIT.java @@ -29,7 +29,6 @@ import org.elasticsearch.client.internal.Client; import org.elasticsearch.cluster.ClusterState; import org.elasticsearch.cluster.ClusterStateUpdateTask; -import org.elasticsearch.cluster.metadata.IndexMetadata; import org.elasticsearch.cluster.metadata.NodesShutdownMetadata; import org.elasticsearch.cluster.metadata.SingleNodeShutdownMetadata; import org.elasticsearch.cluster.node.DiscoveryNode; @@ -1461,11 +1460,7 @@ private void createIndexAndContinue(Releasable releasable) { docPermits = new Semaphore(between(1000, 3000)); logger.info("--> create index [{}] with max [{}] docs", indexName, docPermits.availablePermits()); indicesAdmin().prepareCreate(indexName) - .setSettings( - Settings.builder() - .put(IndexMetadata.INDEX_NUMBER_OF_SHARDS_SETTING.getKey(), shardCount) - .put(IndexMetadata.INDEX_NUMBER_OF_REPLICAS_SETTING.getKey(), between(0, cluster.numDataNodes() - 1)) - ) + .setSettings(indexSettings(shardCount, between(0, cluster.numDataNodes() - 1))) .execute(mustSucceed(response -> { assertTrue(response.isAcknowledged()); logger.info("--> finished create index [{}]", indexName); diff --git a/server/src/test/java/org/elasticsearch/cluster/metadata/DataStreamTests.java b/server/src/test/java/org/elasticsearch/cluster/metadata/DataStreamTests.java index 07481a68c5176..0568f61e0dfd3 100644 --- a/server/src/test/java/org/elasticsearch/cluster/metadata/DataStreamTests.java +++ b/server/src/test/java/org/elasticsearch/cluster/metadata/DataStreamTests.java @@ -1039,11 +1039,7 @@ public void testValidate() { dataStream.validate( (index) -> IndexMetadata.builder(index) .settings( - Settings.builder() - .put(IndexMetadata.INDEX_NUMBER_OF_SHARDS_SETTING.getKey(), 1) - .put(IndexMetadata.INDEX_NUMBER_OF_REPLICAS_SETTING.getKey(), 1) - .put(IndexMetadata.SETTING_INDEX_VERSION_CREATED.getKey(), IndexVersion.current()) - .build() + indexSettings(1, 1).put(IndexMetadata.SETTING_INDEX_VERSION_CREATED.getKey(), IndexVersion.current()).build() ) .build() ); @@ -1058,10 +1054,7 @@ public void testValidate() { () -> dataStream.validate( (index) -> IndexMetadata.builder(index) .settings( - Settings.builder() - .put(IndexMetadata.INDEX_NUMBER_OF_SHARDS_SETTING.getKey(), 1) - .put(IndexMetadata.INDEX_NUMBER_OF_REPLICAS_SETTING.getKey(), 1) - .put(IndexMetadata.SETTING_INDEX_VERSION_CREATED.getKey(), IndexVersion.current()) + indexSettings(1, 1).put(IndexMetadata.SETTING_INDEX_VERSION_CREATED.getKey(), IndexVersion.current()) .put(IndexSettings.MODE.getKey(), IndexMode.TIME_SERIES) .put(IndexSettings.TIME_SERIES_START_TIME.getKey(), start3.toEpochMilli()) .put(IndexSettings.TIME_SERIES_END_TIME.getKey(), end3.toEpochMilli()) diff --git a/server/src/test/java/org/elasticsearch/cluster/routing/allocation/shards/ShardsAvailabilityHealthIndicatorServiceTests.java b/server/src/test/java/org/elasticsearch/cluster/routing/allocation/shards/ShardsAvailabilityHealthIndicatorServiceTests.java index 916683c4a536a..0e3041dda9853 100644 --- a/server/src/test/java/org/elasticsearch/cluster/routing/allocation/shards/ShardsAvailabilityHealthIndicatorServiceTests.java +++ b/server/src/test/java/org/elasticsearch/cluster/routing/allocation/shards/ShardsAvailabilityHealthIndicatorServiceTests.java @@ -2109,10 +2109,7 @@ private static List createIndexMetadataForIndexNameToPriorityMap( for (Map.Entry indexNameToPriority : indexNameToPriorityMap.entrySet()) { String indexName = indexNameToPriority.getKey(); IndexMetadata.Builder indexMetadataBuilder = new IndexMetadata.Builder(indexName); - Settings settings = Settings.builder() - .put(IndexMetadata.SETTING_PRIORITY, indexNameToPriority.getValue()) - .put(IndexMetadata.INDEX_NUMBER_OF_SHARDS_SETTING.getKey(), 1) - .put(IndexMetadata.INDEX_NUMBER_OF_REPLICAS_SETTING.getKey(), 1) + Settings settings = indexSettings(1, 1).put(IndexMetadata.SETTING_PRIORITY, indexNameToPriority.getValue()) .put(IndexMetadata.SETTING_INDEX_VERSION_CREATED.getKey(), IndexVersion.current()) .build(); indexMetadataBuilder.settings(settings); diff --git a/server/src/test/java/org/elasticsearch/gateway/PersistedClusterStateServiceTests.java b/server/src/test/java/org/elasticsearch/gateway/PersistedClusterStateServiceTests.java index d2ad92320cada..ee5b4972c13ad 100644 --- a/server/src/test/java/org/elasticsearch/gateway/PersistedClusterStateServiceTests.java +++ b/server/src/test/java/org/elasticsearch/gateway/PersistedClusterStateServiceTests.java @@ -726,10 +726,7 @@ public void testFailsIfIndexMetadataIsDuplicated() throws IOException { .version(1L) .putMapping(randomMappingMetadataOrNull()) .settings( - Settings.builder() - .put(IndexMetadata.INDEX_NUMBER_OF_SHARDS_SETTING.getKey(), 1) - .put(IndexMetadata.INDEX_NUMBER_OF_REPLICAS_SETTING.getKey(), 0) - .put(IndexMetadata.SETTING_VERSION_CREATED, IndexVersion.current()) + indexSettings(1, 0).put(IndexMetadata.SETTING_VERSION_CREATED, IndexVersion.current()) .put(IndexMetadata.SETTING_INDEX_UUID, indexUUID) ) ) @@ -797,10 +794,7 @@ public void testPersistsAndReloadsIndexMetadataIffVersionOrTermChanges() throws .version(indexMetadataVersion - 1) // -1 because it's incremented in .put() .putMapping(randomMappingMetadataOrNull()) .settings( - Settings.builder() - .put(IndexMetadata.INDEX_NUMBER_OF_SHARDS_SETTING.getKey(), 1) - .put(IndexMetadata.INDEX_NUMBER_OF_REPLICAS_SETTING.getKey(), 0) - .put(IndexMetadata.SETTING_VERSION_CREATED, IndexVersion.current()) + indexSettings(1, 0).put(IndexMetadata.SETTING_VERSION_CREATED, IndexVersion.current()) .put(IndexMetadata.SETTING_INDEX_UUID, indexUUID) ) ) @@ -931,10 +925,7 @@ public void testPersistsAndReloadsIndexMetadataForMultipleIndices() throws IOExc .putMapping(randomMappingMetadataOrNull()) .version(randomLongBetween(0L, Long.MAX_VALUE - 1) - 1) // -1 because it's incremented in .put() .settings( - Settings.builder() - .put(IndexMetadata.INDEX_NUMBER_OF_REPLICAS_SETTING.getKey(), 1) - .put(IndexMetadata.INDEX_NUMBER_OF_SHARDS_SETTING.getKey(), 1) - .put(IndexMetadata.SETTING_VERSION_CREATED, IndexVersion.current()) + indexSettings(1, 1).put(IndexMetadata.SETTING_VERSION_CREATED, IndexVersion.current()) .put(IndexMetadata.SETTING_INDEX_UUID, updatedIndexUuid) ) ) @@ -943,10 +934,7 @@ public void testPersistsAndReloadsIndexMetadataForMultipleIndices() throws IOExc .putMapping(randomMappingMetadataOrNull()) .version(randomLongBetween(0L, Long.MAX_VALUE - 1) - 1) // -1 because it's incremented in .put() .settings( - Settings.builder() - .put(IndexMetadata.INDEX_NUMBER_OF_REPLICAS_SETTING.getKey(), 1) - .put(IndexMetadata.INDEX_NUMBER_OF_SHARDS_SETTING.getKey(), 1) - .put(IndexMetadata.SETTING_VERSION_CREATED, IndexVersion.current()) + indexSettings(1, 1).put(IndexMetadata.SETTING_VERSION_CREATED, IndexVersion.current()) .put(IndexMetadata.SETTING_INDEX_UUID, deletedIndexUuid) ) ) @@ -990,10 +978,7 @@ public void testPersistsAndReloadsIndexMetadataForMultipleIndices() throws IOExc .version(randomLongBetween(0L, Long.MAX_VALUE - 1) - 1) // -1 because it's incremented in .put() .putMapping(randomMappingMetadataOrNull()) .settings( - Settings.builder() - .put(IndexMetadata.INDEX_NUMBER_OF_REPLICAS_SETTING.getKey(), 1) - .put(IndexMetadata.INDEX_NUMBER_OF_SHARDS_SETTING.getKey(), 1) - .put(IndexMetadata.SETTING_VERSION_CREATED, IndexVersion.current()) + indexSettings(1, 1).put(IndexMetadata.SETTING_VERSION_CREATED, IndexVersion.current()) .put(IndexMetadata.SETTING_INDEX_UUID, addedIndexUuid) ) ) @@ -1040,10 +1025,7 @@ public void testReloadsMetadataAcrossMultipleSegments() throws IOException { IndexMetadata.builder(index.getName()) .putMapping(randomMappingMetadataOrNull()) .settings( - Settings.builder() - .put(IndexMetadata.INDEX_NUMBER_OF_SHARDS_SETTING.getKey(), 1) - .put(IndexMetadata.INDEX_NUMBER_OF_REPLICAS_SETTING.getKey(), 0) - .put(IndexMetadata.SETTING_VERSION_CREATED, IndexVersion.current()) + indexSettings(1, 0).put(IndexMetadata.SETTING_VERSION_CREATED, IndexVersion.current()) .put(IndexMetadata.SETTING_INDEX_UUID, index.getUUID()) ) ) @@ -1074,10 +1056,7 @@ public void testHandlesShuffledDocuments() throws IOException { IndexMetadata.builder("test-" + i) .putMapping(randomMappingMetadataOrNull()) .settings( - Settings.builder() - .put(IndexMetadata.INDEX_NUMBER_OF_SHARDS_SETTING.getKey(), 1) - .put(IndexMetadata.INDEX_NUMBER_OF_REPLICAS_SETTING.getKey(), 0) - .put(IndexMetadata.SETTING_VERSION_CREATED, IndexVersion.current()) + indexSettings(1, 0).put(IndexMetadata.SETTING_VERSION_CREATED, IndexVersion.current()) .put(IndexMetadata.SETTING_INDEX_UUID, UUIDs.randomBase64UUID(random())) ) ); @@ -1254,10 +1233,7 @@ public void testSlowLogging() throws IOException, IllegalAccessException { IndexMetadata.builder("test") .putMapping(randomMappingMetadata()) .settings( - Settings.builder() - .put(IndexMetadata.INDEX_NUMBER_OF_SHARDS_SETTING.getKey(), 1) - .put(IndexMetadata.INDEX_NUMBER_OF_REPLICAS_SETTING.getKey(), 0) - .put(IndexMetadata.SETTING_VERSION_CREATED, IndexVersion.current()) + indexSettings(1, 0).put(IndexMetadata.SETTING_VERSION_CREATED, IndexVersion.current()) .put(IndexMetadata.SETTING_INDEX_UUID, "test-uuid") ) ) @@ -1369,10 +1345,7 @@ public void testLimitsFileCount() throws IOException { IndexMetadata.builder("index-" + i) .putMapping(randomMappingMetadataOrNull()) .settings( - Settings.builder() - .put(IndexMetadata.INDEX_NUMBER_OF_SHARDS_SETTING.getKey(), 1) - .put(IndexMetadata.INDEX_NUMBER_OF_REPLICAS_SETTING.getKey(), 0) - .put(IndexMetadata.SETTING_VERSION_CREATED, IndexVersion.current()) + indexSettings(1, 0).put(IndexMetadata.SETTING_VERSION_CREATED, IndexVersion.current()) .put(IndexMetadata.SETTING_INDEX_UUID, UUIDs.randomBase64UUID(random())) ) ) @@ -1592,10 +1565,7 @@ public void testFailsIfMappingIsDuplicated() throws IOException { IndexMetadata.builder("test-1") .putMapping(randomMappingMetadata()) .settings( - Settings.builder() - .put(IndexMetadata.INDEX_NUMBER_OF_SHARDS_SETTING.getKey(), 1) - .put(IndexMetadata.INDEX_NUMBER_OF_REPLICAS_SETTING.getKey(), 0) - .put(IndexMetadata.SETTING_VERSION_CREATED, IndexVersion.current()) + indexSettings(1, 0).put(IndexMetadata.SETTING_VERSION_CREATED, IndexVersion.current()) .put(IndexMetadata.SETTING_INDEX_UUID, UUIDs.randomBase64UUID(random())) ) ) @@ -1647,10 +1617,7 @@ public void testFailsIfMappingIsMissing() throws IOException { IndexMetadata.builder("test-1") .putMapping(randomMappingMetadata()) .settings( - Settings.builder() - .put(IndexMetadata.INDEX_NUMBER_OF_SHARDS_SETTING.getKey(), 1) - .put(IndexMetadata.INDEX_NUMBER_OF_REPLICAS_SETTING.getKey(), 0) - .put(IndexMetadata.SETTING_VERSION_CREATED, IndexVersion.current()) + indexSettings(1, 0).put(IndexMetadata.SETTING_VERSION_CREATED, IndexVersion.current()) .put(IndexMetadata.SETTING_INDEX_UUID, UUIDs.randomBase64UUID(random())) ) ) @@ -1713,10 +1680,7 @@ public void testDeduplicatedMappings() throws IOException { IndexMetadata.builder("test-" + i) .putMapping(mapping1) .settings( - Settings.builder() - .put(IndexMetadata.INDEX_NUMBER_OF_SHARDS_SETTING.getKey(), 1) - .put(IndexMetadata.INDEX_NUMBER_OF_REPLICAS_SETTING.getKey(), 0) - .put(IndexMetadata.SETTING_VERSION_CREATED, IndexVersion.current()) + indexSettings(1, 0).put(IndexMetadata.SETTING_VERSION_CREATED, IndexVersion.current()) .put(IndexMetadata.SETTING_INDEX_UUID, UUIDs.randomBase64UUID(random())) ) ); @@ -1740,10 +1704,7 @@ public void testDeduplicatedMappings() throws IOException { IndexMetadata.builder("test-" + 99) .putMapping(mapping2) .settings( - Settings.builder() - .put(IndexMetadata.INDEX_NUMBER_OF_SHARDS_SETTING.getKey(), 1) - .put(IndexMetadata.INDEX_NUMBER_OF_REPLICAS_SETTING.getKey(), 0) - .put(IndexMetadata.SETTING_VERSION_CREATED, IndexVersion.current()) + indexSettings(1, 0).put(IndexMetadata.SETTING_VERSION_CREATED, IndexVersion.current()) .put(IndexMetadata.SETTING_INDEX_UUID, UUIDs.randomBase64UUID(random())) ) ); diff --git a/server/src/test/java/org/elasticsearch/snapshots/SnapshotResiliencyTests.java b/server/src/test/java/org/elasticsearch/snapshots/SnapshotResiliencyTests.java index 8dcb5ce355849..b54a786e05c9d 100644 --- a/server/src/test/java/org/elasticsearch/snapshots/SnapshotResiliencyTests.java +++ b/server/src/test/java/org/elasticsearch/snapshots/SnapshotResiliencyTests.java @@ -1816,10 +1816,7 @@ private void scheduleNow(Runnable runnable) { private static Settings defaultIndexSettings(int shards) { // TODO: randomize replica count settings once recovery operations aren't blocking anymore - return Settings.builder() - .put(IndexMetadata.INDEX_NUMBER_OF_SHARDS_SETTING.getKey(), shards) - .put(IndexMetadata.INDEX_NUMBER_OF_REPLICAS_SETTING.getKey(), 0) - .build(); + return indexSettings(shards, 0).build(); } private static void continueOrDie(SubscribableListener listener, CheckedConsumer onResponse) { diff --git a/test/framework/src/main/java/org/elasticsearch/index/engine/EngineTestCase.java b/test/framework/src/main/java/org/elasticsearch/index/engine/EngineTestCase.java index f383a1aaab12d..8412e9e250885 100644 --- a/test/framework/src/main/java/org/elasticsearch/index/engine/EngineTestCase.java +++ b/test/framework/src/main/java/org/elasticsearch/index/engine/EngineTestCase.java @@ -1400,12 +1400,7 @@ public static void assertAtMostOneLuceneDocumentPerSequenceNumber(IndexSettings public static MapperService createMapperService() throws IOException { IndexMetadata indexMetadata = IndexMetadata.builder("test") - .settings( - Settings.builder() - .put(IndexMetadata.SETTING_VERSION_CREATED, IndexVersion.current()) - .put(IndexMetadata.SETTING_NUMBER_OF_SHARDS, 1) - .put(IndexMetadata.SETTING_NUMBER_OF_REPLICAS, 1) - ) + .settings(indexSettings(1, 1).put(IndexMetadata.SETTING_VERSION_CREATED, IndexVersion.current())) .putMapping("{\"properties\": {}}") .build(); MapperService mapperService = MapperTestUtils.newMapperService( diff --git a/test/framework/src/main/java/org/elasticsearch/index/replication/ESIndexLevelReplicationTestCase.java b/test/framework/src/main/java/org/elasticsearch/index/replication/ESIndexLevelReplicationTestCase.java index 6157d6997641d..13bdf2edb0f24 100644 --- a/test/framework/src/main/java/org/elasticsearch/index/replication/ESIndexLevelReplicationTestCase.java +++ b/test/framework/src/main/java/org/elasticsearch/index/replication/ESIndexLevelReplicationTestCase.java @@ -128,10 +128,7 @@ protected IndexMetadata buildIndexMetadata(int replicas, String mappings) throws } protected IndexMetadata buildIndexMetadata(int replicas, Settings indexSettings, String mappings) { - Settings settings = Settings.builder() - .put(IndexMetadata.SETTING_VERSION_CREATED, IndexVersion.current()) - .put(IndexMetadata.SETTING_NUMBER_OF_REPLICAS, replicas) - .put(IndexMetadata.SETTING_NUMBER_OF_SHARDS, 1) + Settings settings = indexSettings(1, replicas).put(IndexMetadata.SETTING_VERSION_CREATED, IndexVersion.current()) .put(IndexSettings.INDEX_SOFT_DELETES_RETENTION_OPERATIONS_SETTING.getKey(), between(0, 1000)) .put(indexSettings) .build(); diff --git a/test/framework/src/main/java/org/elasticsearch/index/shard/IndexShardTestCase.java b/test/framework/src/main/java/org/elasticsearch/index/shard/IndexShardTestCase.java index 4d15bf9e3f943..bebaeced797db 100644 --- a/test/framework/src/main/java/org/elasticsearch/index/shard/IndexShardTestCase.java +++ b/test/framework/src/main/java/org/elasticsearch/index/shard/IndexShardTestCase.java @@ -301,10 +301,7 @@ protected IndexShard newShard( final IndexingOperationListener... listeners ) throws IOException { assert shardRouting.initializing() : shardRouting; - Settings indexSettings = Settings.builder() - .put(IndexMetadata.SETTING_VERSION_CREATED, IndexVersion.current()) - .put(IndexMetadata.SETTING_NUMBER_OF_REPLICAS, 0) - .put(IndexMetadata.SETTING_NUMBER_OF_SHARDS, 1) + Settings indexSettings = indexSettings(1, 0).put(IndexMetadata.SETTING_VERSION_CREATED, IndexVersion.current()) .put( IndexSettings.INDEX_SOFT_DELETES_RETENTION_OPERATIONS_SETTING.getKey(), randomBoolean() ? IndexSettings.INDEX_SOFT_DELETES_RETENTION_OPERATIONS_SETTING.get(Settings.EMPTY) : between(0, 1000) diff --git a/test/framework/src/main/java/org/elasticsearch/indices/recovery/AbstractIndexRecoveryIntegTestCase.java b/test/framework/src/main/java/org/elasticsearch/indices/recovery/AbstractIndexRecoveryIntegTestCase.java index c6c9f5b727980..16ddcb750dd6b 100644 --- a/test/framework/src/main/java/org/elasticsearch/indices/recovery/AbstractIndexRecoveryIntegTestCase.java +++ b/test/framework/src/main/java/org/elasticsearch/indices/recovery/AbstractIndexRecoveryIntegTestCase.java @@ -107,12 +107,7 @@ protected void checkTransientErrorsDuringRecoveryAreRetried(String recoveryActio assertThat(response.isTimedOut(), is(false)); indicesAdmin().prepareCreate(indexName) - .setSettings( - Settings.builder() - .put(IndexMetadata.INDEX_ROUTING_INCLUDE_GROUP_SETTING.getKey() + "color", "blue") - .put(IndexMetadata.SETTING_NUMBER_OF_SHARDS, 1) - .put(IndexMetadata.SETTING_NUMBER_OF_REPLICAS, 0) - ) + .setSettings(indexSettings(1, 0).put(IndexMetadata.INDEX_ROUTING_INCLUDE_GROUP_SETTING.getKey() + "color", "blue")) .get(); List requests = new ArrayList<>(); @@ -213,12 +208,7 @@ public void checkDisconnectsWhileRecovering(String recoveryActionToBlock) throws assertThat(response.isTimedOut(), is(false)); indicesAdmin().prepareCreate(indexName) - .setSettings( - Settings.builder() - .put(IndexMetadata.INDEX_ROUTING_INCLUDE_GROUP_SETTING.getKey() + "color", "blue") - .put(IndexMetadata.SETTING_NUMBER_OF_SHARDS, 1) - .put(IndexMetadata.SETTING_NUMBER_OF_REPLICAS, 0) - ) + .setSettings(indexSettings(1, 0).put(IndexMetadata.INDEX_ROUTING_INCLUDE_GROUP_SETTING.getKey() + "color", "blue")) .get(); List requests = new ArrayList<>(); @@ -314,12 +304,7 @@ public void checkDisconnectsDuringRecovery(boolean useSnapshotBasedRecoveries) t final String redNodeName = internalCluster().startNode(Settings.builder().put("node.attr.color", "red").put(nodeSettings).build()); indicesAdmin().prepareCreate(indexName) - .setSettings( - Settings.builder() - .put(IndexMetadata.INDEX_ROUTING_INCLUDE_GROUP_SETTING.getKey() + "color", "blue") - .put(IndexMetadata.SETTING_NUMBER_OF_SHARDS, 1) - .put(IndexMetadata.SETTING_NUMBER_OF_REPLICAS, 0) - ) + .setSettings(indexSettings(1, 0).put(IndexMetadata.INDEX_ROUTING_INCLUDE_GROUP_SETTING.getKey() + "color", "blue")) .get(); List requests = new ArrayList<>(); diff --git a/test/framework/src/main/java/org/elasticsearch/test/ESSingleNodeTestCase.java b/test/framework/src/main/java/org/elasticsearch/test/ESSingleNodeTestCase.java index 2fc4d63d0120a..ee62c7f0b6ffd 100644 --- a/test/framework/src/main/java/org/elasticsearch/test/ESSingleNodeTestCase.java +++ b/test/framework/src/main/java/org/elasticsearch/test/ESSingleNodeTestCase.java @@ -99,7 +99,7 @@ protected void startNode(long seed) throws Exception { indicesAdmin().preparePutTemplate("one_shard_index_template") .setPatterns(Collections.singletonList("*")) .setOrder(0) - .setSettings(Settings.builder().put(IndexMetadata.SETTING_NUMBER_OF_SHARDS, 1).put(IndexMetadata.SETTING_NUMBER_OF_REPLICAS, 0)) + .setSettings(indexSettings(1, 0)) .get(); indicesAdmin().preparePutTemplate("random-soft-deletes-template") .setPatterns(Collections.singletonList("*")) diff --git a/test/framework/src/main/java/org/elasticsearch/test/IndexSettingsModule.java b/test/framework/src/main/java/org/elasticsearch/test/IndexSettingsModule.java index a9250a6b327d2..7c7166839ebf4 100644 --- a/test/framework/src/main/java/org/elasticsearch/test/IndexSettingsModule.java +++ b/test/framework/src/main/java/org/elasticsearch/test/IndexSettingsModule.java @@ -49,10 +49,8 @@ public static IndexSettings newIndexSettings(Index index, Settings settings, Set } public static IndexSettings newIndexSettings(Index index, Settings indexSetting, Settings nodeSettings, Setting... setting) { - Settings build = Settings.builder() + Settings build = ESTestCase.indexSettings(1, 1) .put(IndexMetadata.SETTING_VERSION_CREATED, IndexVersion.current()) - .put(IndexMetadata.SETTING_NUMBER_OF_REPLICAS, 1) - .put(IndexMetadata.SETTING_NUMBER_OF_SHARDS, 1) .put(indexSetting) .build(); IndexMetadata metadata = IndexMetadata.builder(index.getName()) @@ -67,10 +65,8 @@ public static IndexSettings newIndexSettings(Index index, Settings indexSetting, } public static IndexSettings newIndexSettings(Index index, Settings settings, IndexScopedSettings indexScopedSettings) { - Settings build = Settings.builder() + Settings build = ESTestCase.indexSettings(1, 1) .put(IndexMetadata.SETTING_VERSION_CREATED, IndexVersion.current()) - .put(IndexMetadata.SETTING_NUMBER_OF_REPLICAS, 1) - .put(IndexMetadata.SETTING_NUMBER_OF_SHARDS, 1) .put(settings) .build(); IndexMetadata metadata = IndexMetadata.builder(index.getName()) diff --git a/x-pack/plugin/ccr/src/internalClusterTest/java/org/elasticsearch/xpack/ccr/AutoFollowIT.java b/x-pack/plugin/ccr/src/internalClusterTest/java/org/elasticsearch/xpack/ccr/AutoFollowIT.java index bbd1905374d24..5f4b740f150ef 100644 --- a/x-pack/plugin/ccr/src/internalClusterTest/java/org/elasticsearch/xpack/ccr/AutoFollowIT.java +++ b/x-pack/plugin/ccr/src/internalClusterTest/java/org/elasticsearch/xpack/ccr/AutoFollowIT.java @@ -103,10 +103,7 @@ public String getFeatureDescription() { } public void testAutoFollow() throws Exception { - Settings leaderIndexSettings = Settings.builder() - .put(IndexMetadata.INDEX_NUMBER_OF_SHARDS_SETTING.getKey(), 1) - .put(IndexMetadata.INDEX_NUMBER_OF_REPLICAS_SETTING.getKey(), 0) - .build(); + Settings leaderIndexSettings = indexSettings(1, 0).build(); createLeaderIndex("logs-201812", leaderIndexSettings); @@ -139,10 +136,7 @@ public void testAutoFollowDoNotFollowSystemIndices() throws Exception { // Trigger system index creation leaderClient().prepareIndex(FakeSystemIndex.SYSTEM_INDEX_NAME).setSource(Map.of("a", "b")).get(); - Settings leaderIndexSettings = Settings.builder() - .put(IndexMetadata.INDEX_NUMBER_OF_SHARDS_SETTING.getKey(), 1) - .put(IndexMetadata.INDEX_NUMBER_OF_REPLICAS_SETTING.getKey(), 0) - .build(); + Settings leaderIndexSettings = indexSettings(1, 0).build(); createLeaderIndex("logs-201901", leaderIndexSettings); assertLongBusy(() -> { AutoFollowStats autoFollowStats = getAutoFollowStats(); @@ -153,10 +147,7 @@ public void testAutoFollowDoNotFollowSystemIndices() throws Exception { } public void testCleanFollowedLeaderIndexUUIDs() throws Exception { - Settings leaderIndexSettings = Settings.builder() - .put(IndexMetadata.INDEX_NUMBER_OF_SHARDS_SETTING.getKey(), 1) - .put(IndexMetadata.INDEX_NUMBER_OF_REPLICAS_SETTING.getKey(), 0) - .build(); + Settings leaderIndexSettings = indexSettings(1, 0).build(); putAutoFollowPatterns("my-pattern", new String[] { "logs-*" }); createLeaderIndex("logs-201901", leaderIndexSettings); @@ -192,10 +183,7 @@ public void testCleanFollowedLeaderIndexUUIDs() throws Exception { } public void testAutoFollowManyIndices() throws Exception { - Settings leaderIndexSettings = Settings.builder() - .put(IndexMetadata.INDEX_NUMBER_OF_SHARDS_SETTING.getKey(), 1) - .put(IndexMetadata.INDEX_NUMBER_OF_REPLICAS_SETTING.getKey(), 0) - .build(); + Settings leaderIndexSettings = indexSettings(1, 0).build(); putAutoFollowPatterns("my-pattern", new String[] { "logs-*" }); long numIndices = randomIntBetween(4, 8); @@ -267,10 +255,7 @@ public void testAutoFollowManyIndices() throws Exception { } public void testAutoFollowParameterAreDelegated() throws Exception { - Settings leaderIndexSettings = Settings.builder() - .put(IndexMetadata.INDEX_NUMBER_OF_SHARDS_SETTING.getKey(), 1) - .put(IndexMetadata.INDEX_NUMBER_OF_REPLICAS_SETTING.getKey(), 0) - .build(); + Settings leaderIndexSettings = indexSettings(1, 0).build(); // Enabling auto following: PutAutoFollowPatternAction.Request request = new PutAutoFollowPatternAction.Request(TEST_REQUEST_TIMEOUT, TEST_REQUEST_TIMEOUT); @@ -377,10 +362,7 @@ public void testAutoFollowParameterAreDelegated() throws Exception { } public void testConflictingPatterns() throws Exception { - Settings leaderIndexSettings = Settings.builder() - .put(IndexMetadata.INDEX_NUMBER_OF_SHARDS_SETTING.getKey(), 1) - .put(IndexMetadata.INDEX_NUMBER_OF_REPLICAS_SETTING.getKey(), 0) - .build(); + Settings leaderIndexSettings = indexSettings(1, 0).build(); // Enabling auto following: putAutoFollowPatterns("my-pattern1", new String[] { "logs-*" }); @@ -422,10 +404,7 @@ public void testConflictingPatterns() throws Exception { } public void testPauseAndResumeAutoFollowPattern() throws Exception { - final Settings leaderIndexSettings = Settings.builder() - .put(IndexMetadata.INDEX_NUMBER_OF_SHARDS_SETTING.getKey(), 1) - .put(IndexMetadata.INDEX_NUMBER_OF_REPLICAS_SETTING.getKey(), 0) - .build(); + final Settings leaderIndexSettings = indexSettings(1, 0).build(); // index created in the remote cluster before the auto follow pattern exists won't be auto followed createLeaderIndex("test-existing-index-is-ignored", leaderIndexSettings); @@ -504,10 +483,7 @@ public void testPauseAndResumeAutoFollowPattern() throws Exception { } public void testPauseAndResumeWithMultipleAutoFollowPatterns() throws Exception { - final Settings leaderIndexSettings = Settings.builder() - .put(IndexMetadata.INDEX_NUMBER_OF_SHARDS_SETTING.getKey(), 1) - .put(IndexMetadata.INDEX_NUMBER_OF_REPLICAS_SETTING.getKey(), 0) - .build(); + final Settings leaderIndexSettings = indexSettings(1, 0).build(); final String[] prefixes = { "logs-", "users-", "docs-", "monitoring-", "data-", "system-", "events-", "files-" }; @@ -609,10 +585,7 @@ public void testPauseAndResumeWithMultipleAutoFollowPatterns() throws Exception } public void testAutoFollowExclusion() throws Exception { - Settings leaderIndexSettings = Settings.builder() - .put(IndexMetadata.INDEX_NUMBER_OF_SHARDS_SETTING.getKey(), 1) - .put(IndexMetadata.INDEX_NUMBER_OF_REPLICAS_SETTING.getKey(), 0) - .build(); + Settings leaderIndexSettings = indexSettings(1, 0).build(); putAutoFollowPatterns("my-pattern1", new String[] { "logs-*" }, Collections.singletonList("logs-2018*")); diff --git a/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/ilm/WaitUntilTimeSeriesEndTimePassesStepTests.java b/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/ilm/WaitUntilTimeSeriesEndTimePassesStepTests.java index fae437c1d2fc9..50ed7ddcc3f33 100644 --- a/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/ilm/WaitUntilTimeSeriesEndTimePassesStepTests.java +++ b/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/ilm/WaitUntilTimeSeriesEndTimePassesStepTests.java @@ -12,7 +12,6 @@ import org.elasticsearch.cluster.metadata.IndexMetadata; import org.elasticsearch.cluster.metadata.Metadata; import org.elasticsearch.common.Strings; -import org.elasticsearch.common.settings.Settings; import org.elasticsearch.core.Tuple; import org.elasticsearch.index.Index; import org.elasticsearch.index.IndexVersion; @@ -124,13 +123,7 @@ public void onFailure(Exception e) { { // regular indices (non-ts) meet the step condition IndexMetadata indexMeta = IndexMetadata.builder(randomAlphaOfLengthBetween(10, 30)) - .settings( - Settings.builder() - .put(IndexMetadata.INDEX_NUMBER_OF_SHARDS_SETTING.getKey(), 1) - .put(IndexMetadata.INDEX_NUMBER_OF_REPLICAS_SETTING.getKey(), 1) - .put(IndexMetadata.SETTING_INDEX_VERSION_CREATED.getKey(), IndexVersion.current()) - .build() - ) + .settings(indexSettings(1, 1).put(IndexMetadata.SETTING_INDEX_VERSION_CREATED.getKey(), IndexVersion.current()).build()) .build(); Metadata newMetadata = Metadata.builder(clusterState.metadata()).put(indexMeta, true).build(); diff --git a/x-pack/plugin/eql/qa/mixed-node/src/javaRestTest/java/org/elasticsearch/xpack/eql/qa/mixed_node/EqlSearchIT.java b/x-pack/plugin/eql/qa/mixed-node/src/javaRestTest/java/org/elasticsearch/xpack/eql/qa/mixed_node/EqlSearchIT.java index d8b887b98e647..2a29572374fa8 100644 --- a/x-pack/plugin/eql/qa/mixed-node/src/javaRestTest/java/org/elasticsearch/xpack/eql/qa/mixed_node/EqlSearchIT.java +++ b/x-pack/plugin/eql/qa/mixed-node/src/javaRestTest/java/org/elasticsearch/xpack/eql/qa/mixed_node/EqlSearchIT.java @@ -11,8 +11,6 @@ import org.elasticsearch.client.Request; import org.elasticsearch.client.Response; import org.elasticsearch.client.RestClient; -import org.elasticsearch.cluster.metadata.IndexMetadata; -import org.elasticsearch.common.settings.Settings; import org.elasticsearch.common.xcontent.XContentHelper; import org.elasticsearch.test.NotEqualMessageBuilder; import org.elasticsearch.test.rest.ESRestTestCase; @@ -68,14 +66,7 @@ public void createIndex() throws IOException { bwcNodes = new ArrayList<>(nodes.getBWCNodes()); String mappings = readResource(EqlSearchIT.class.getResourceAsStream("/eql_mapping.json")); - createIndex( - index, - Settings.builder() - .put(IndexMetadata.INDEX_NUMBER_OF_SHARDS_SETTING.getKey(), numShards) - .put(IndexMetadata.SETTING_NUMBER_OF_REPLICAS, numReplicas) - .build(), - mappings - ); + createIndex(index, indexSettings(numShards, numReplicas).build(), mappings); } @After diff --git a/x-pack/plugin/esql/src/internalClusterTest/java/org/elasticsearch/xpack/esql/action/AbstractPausableIntegTestCase.java b/x-pack/plugin/esql/src/internalClusterTest/java/org/elasticsearch/xpack/esql/action/AbstractPausableIntegTestCase.java index 7a5072120e5af..8de65847c3f85 100644 --- a/x-pack/plugin/esql/src/internalClusterTest/java/org/elasticsearch/xpack/esql/action/AbstractPausableIntegTestCase.java +++ b/x-pack/plugin/esql/src/internalClusterTest/java/org/elasticsearch/xpack/esql/action/AbstractPausableIntegTestCase.java @@ -82,12 +82,7 @@ public void setupIndex() throws IOException { mapping.endObject(); } mapping.endObject(); - client().admin() - .indices() - .prepareCreate("test") - .setSettings(Map.of("number_of_shards", 1, "number_of_replicas", 0)) - .setMapping(mapping.endObject()) - .get(); + client().admin().indices().prepareCreate("test").setSettings(indexSettings(1, 0)).setMapping(mapping.endObject()).get(); BulkRequestBuilder bulk = client().prepareBulk().setRefreshPolicy(WriteRequest.RefreshPolicy.IMMEDIATE); for (int i = 0; i < numberOfDocs(); i++) { diff --git a/x-pack/plugin/ml/qa/single-node-tests/src/javaRestTest/java/org/elasticsearch/xpack/ml/transforms/PainlessDomainSplitIT.java b/x-pack/plugin/ml/qa/single-node-tests/src/javaRestTest/java/org/elasticsearch/xpack/ml/transforms/PainlessDomainSplitIT.java index cbdad9e86f4f8..321f34f81f16a 100644 --- a/x-pack/plugin/ml/qa/single-node-tests/src/javaRestTest/java/org/elasticsearch/xpack/ml/transforms/PainlessDomainSplitIT.java +++ b/x-pack/plugin/ml/qa/single-node-tests/src/javaRestTest/java/org/elasticsearch/xpack/ml/transforms/PainlessDomainSplitIT.java @@ -9,7 +9,6 @@ import org.apache.http.util.EntityUtils; import org.elasticsearch.client.Request; import org.elasticsearch.client.Response; -import org.elasticsearch.cluster.metadata.IndexMetadata; import org.elasticsearch.common.Strings; import org.elasticsearch.common.settings.Settings; import org.elasticsearch.test.rest.ESRestTestCase; @@ -186,9 +185,7 @@ static class TestConfiguration { } public void testIsolated() throws Exception { - Settings.Builder settings = Settings.builder() - .put(IndexMetadata.INDEX_NUMBER_OF_SHARDS_SETTING.getKey(), 1) - .put(IndexMetadata.INDEX_NUMBER_OF_REPLICAS_SETTING.getKey(), 0); + Settings.Builder settings = indexSettings(1, 0); createIndex("painless", settings.build()); Request createDoc = new Request("PUT", "/painless/_doc/1"); @@ -282,9 +279,7 @@ public void testHRDSplit() throws Exception { client().performRequest(new Request("POST", BASE_PATH + "anomaly_detectors/hrd-split-job/_open")); // Create index to hold data - Settings.Builder settings = Settings.builder() - .put(IndexMetadata.INDEX_NUMBER_OF_SHARDS_SETTING.getKey(), 1) - .put(IndexMetadata.INDEX_NUMBER_OF_REPLICAS_SETTING.getKey(), 0); + Settings.Builder settings = indexSettings(1, 0); createIndex("painless", settings.build(), """ "properties": { "domain": { "type": "keyword" },"time": { "type": "date" } }"""); diff --git a/x-pack/plugin/searchable-snapshots/src/internalClusterTest/java/org/elasticsearch/xpack/searchablesnapshots/SearchableSnapshotsResizeIntegTests.java b/x-pack/plugin/searchable-snapshots/src/internalClusterTest/java/org/elasticsearch/xpack/searchablesnapshots/SearchableSnapshotsResizeIntegTests.java index 0811ee86b3c32..0a0cce5dd87ed 100644 --- a/x-pack/plugin/searchable-snapshots/src/internalClusterTest/java/org/elasticsearch/xpack/searchablesnapshots/SearchableSnapshotsResizeIntegTests.java +++ b/x-pack/plugin/searchable-snapshots/src/internalClusterTest/java/org/elasticsearch/xpack/searchablesnapshots/SearchableSnapshotsResizeIntegTests.java @@ -20,7 +20,6 @@ import static org.elasticsearch.cluster.metadata.IndexMetadata.INDEX_NUMBER_OF_REPLICAS_SETTING; import static org.elasticsearch.cluster.metadata.IndexMetadata.INDEX_NUMBER_OF_ROUTING_SHARDS_SETTING; -import static org.elasticsearch.cluster.metadata.IndexMetadata.INDEX_NUMBER_OF_SHARDS_SETTING; import static org.elasticsearch.index.IndexSettings.INDEX_SOFT_DELETES_SETTING; import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.assertAcked; import static org.elasticsearch.xpack.core.searchablesnapshots.MountSearchableSnapshotRequest.Storage; @@ -37,11 +36,7 @@ public void setUp() throws Exception { assertAcked( prepareCreate( "index", - Settings.builder() - .put(INDEX_NUMBER_OF_REPLICAS_SETTING.getKey(), 0) - .put(INDEX_NUMBER_OF_SHARDS_SETTING.getKey(), 2) - .put(INDEX_NUMBER_OF_ROUTING_SHARDS_SETTING.getKey(), 4) - .put(INDEX_SOFT_DELETES_SETTING.getKey(), true) + indexSettings(2, 0).put(INDEX_NUMBER_OF_ROUTING_SHARDS_SETTING.getKey(), 4).put(INDEX_SOFT_DELETES_SETTING.getKey(), true) ) ); indexRandomDocs("index", scaledRandomIntBetween(0, 1_000)); diff --git a/x-pack/plugin/security/src/internalClusterTest/java/org/elasticsearch/xpack/security/authz/SnapshotUserRoleIntegTests.java b/x-pack/plugin/security/src/internalClusterTest/java/org/elasticsearch/xpack/security/authz/SnapshotUserRoleIntegTests.java index d04c2a4b0c578..3c639471f80b5 100644 --- a/x-pack/plugin/security/src/internalClusterTest/java/org/elasticsearch/xpack/security/authz/SnapshotUserRoleIntegTests.java +++ b/x-pack/plugin/security/src/internalClusterTest/java/org/elasticsearch/xpack/security/authz/SnapshotUserRoleIntegTests.java @@ -54,7 +54,7 @@ public void setupClusterBeforeSnapshot() throws IOException { logger.info("--> creating ordinary index"); final int shards = between(1, 10); ordinaryIndex = randomAlphaOfLength(4).toLowerCase(Locale.ROOT); - assertAcked(prepareCreate(ordinaryIndex, 0, Settings.builder().put("number_of_shards", shards).put("number_of_replicas", 0))); + assertAcked(prepareCreate(ordinaryIndex, 0, indexSettings(shards, 0))); ensureGreen(); logger.info("--> creating snapshot_user user"); diff --git a/x-pack/plugin/sql/qa/mixed-node/src/javaRestTest/java/org/elasticsearch/xpack/sql/qa/mixed_node/SqlSearchIT.java b/x-pack/plugin/sql/qa/mixed-node/src/javaRestTest/java/org/elasticsearch/xpack/sql/qa/mixed_node/SqlSearchIT.java index 41ebd6adffd41..f05eccb737ca2 100644 --- a/x-pack/plugin/sql/qa/mixed-node/src/javaRestTest/java/org/elasticsearch/xpack/sql/qa/mixed_node/SqlSearchIT.java +++ b/x-pack/plugin/sql/qa/mixed-node/src/javaRestTest/java/org/elasticsearch/xpack/sql/qa/mixed_node/SqlSearchIT.java @@ -13,8 +13,6 @@ import org.elasticsearch.client.Request; import org.elasticsearch.client.Response; import org.elasticsearch.client.RestClient; -import org.elasticsearch.cluster.metadata.IndexMetadata; -import org.elasticsearch.common.settings.Settings; import org.elasticsearch.common.xcontent.XContentHelper; import org.elasticsearch.test.NotEqualMessageBuilder; import org.elasticsearch.test.rest.ESRestTestCase; @@ -63,14 +61,7 @@ public void createIndex() throws IOException { bwcNodes = new ArrayList<>(nodes.getBWCNodes()); String mappings = readResource(SqlSearchIT.class.getResourceAsStream("/all_field_types.json")); - createIndex( - index, - Settings.builder() - .put(IndexMetadata.INDEX_NUMBER_OF_SHARDS_SETTING.getKey(), numShards) - .put(IndexMetadata.SETTING_NUMBER_OF_REPLICAS, numReplicas) - .build(), - mappings - ); + createIndex(index, indexSettings(numShards, numReplicas).build(), mappings); } @After diff --git a/x-pack/plugin/transform/qa/single-node-tests/src/javaRestTest/java/org/elasticsearch/xpack/transform/integration/TransformAuditorIT.java b/x-pack/plugin/transform/qa/single-node-tests/src/javaRestTest/java/org/elasticsearch/xpack/transform/integration/TransformAuditorIT.java index b0d56f47cbec6..477ce2127ed42 100644 --- a/x-pack/plugin/transform/qa/single-node-tests/src/javaRestTest/java/org/elasticsearch/xpack/transform/integration/TransformAuditorIT.java +++ b/x-pack/plugin/transform/qa/single-node-tests/src/javaRestTest/java/org/elasticsearch/xpack/transform/integration/TransformAuditorIT.java @@ -9,7 +9,6 @@ import org.elasticsearch.client.Request; import org.elasticsearch.client.RequestOptions; -import org.elasticsearch.cluster.metadata.IndexMetadata; import org.elasticsearch.common.Strings; import org.elasticsearch.common.settings.Settings; import org.elasticsearch.xpack.core.transform.transforms.persistence.TransformInternalIndexConstants; @@ -95,9 +94,7 @@ public void testAuditorWritesAudits() throws Exception { } public void testAliasCreatedforBWCIndexes() throws Exception { - Settings.Builder settings = Settings.builder() - .put(IndexMetadata.INDEX_NUMBER_OF_SHARDS_SETTING.getKey(), 1) - .put(IndexMetadata.INDEX_NUMBER_OF_REPLICAS_SETTING.getKey(), 0); + Settings.Builder settings = indexSettings(1, 0); // These indices should only exist if created in previous versions, ignore the deprecation warning for this test RequestOptions options = expectWarnings( diff --git a/x-pack/plugin/watcher/src/test/java/org/elasticsearch/xpack/watcher/watch/WatchStoreUtilsTests.java b/x-pack/plugin/watcher/src/test/java/org/elasticsearch/xpack/watcher/watch/WatchStoreUtilsTests.java index ffceb054ae54d..a04eb3e7d5091 100644 --- a/x-pack/plugin/watcher/src/test/java/org/elasticsearch/xpack/watcher/watch/WatchStoreUtilsTests.java +++ b/x-pack/plugin/watcher/src/test/java/org/elasticsearch/xpack/watcher/watch/WatchStoreUtilsTests.java @@ -133,10 +133,7 @@ public void testGetConcreteIndexForConcreteIndex() { private IndexMetadata createIndexMetaData(String indexName, AliasMetadata aliasMetadata) { IndexMetadata.Builder indexMetadataBuilder = new IndexMetadata.Builder(indexName); - Settings settings = Settings.builder() - .put(IndexMetadata.SETTING_PRIORITY, 5) - .put(IndexMetadata.INDEX_NUMBER_OF_SHARDS_SETTING.getKey(), 1) - .put(IndexMetadata.INDEX_NUMBER_OF_REPLICAS_SETTING.getKey(), 1) + Settings settings = indexSettings(1, 1).put(IndexMetadata.SETTING_PRIORITY, 5) .put(IndexMetadata.SETTING_INDEX_VERSION_CREATED.getKey(), IndexVersion.current()) .build(); indexMetadataBuilder.settings(settings); From 7500484646e1544d8c7d849319f0983d45b83cf2 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Lorenzo=20Dematt=C3=A9?= Date: Mon, 5 Aug 2024 10:30:30 +0200 Subject: [PATCH 33/36] Add one missing test case (#111553) --- .../SystemIndexMappingUpdateServiceTests.java | 14 ++++++++++++-- 1 file changed, 12 insertions(+), 2 deletions(-) diff --git a/server/src/test/java/org/elasticsearch/indices/SystemIndexMappingUpdateServiceTests.java b/server/src/test/java/org/elasticsearch/indices/SystemIndexMappingUpdateServiceTests.java index 108ce6b63cec8..d805f8528897e 100644 --- a/server/src/test/java/org/elasticsearch/indices/SystemIndexMappingUpdateServiceTests.java +++ b/server/src/test/java/org/elasticsearch/indices/SystemIndexMappingUpdateServiceTests.java @@ -208,8 +208,18 @@ public void testManagerSkipsIndicesWithUpToDateMappings() { ); } - // TODO[wrb]: add test where we have the old mappings version but not the new one - // Is this where we "placeholder" a "distant future" version string? + /** + * Check that the manager will try to upgrade indices when we have the old mappings version but not the new one + */ + public void testManagerProcessesIndicesWithOldMappingsVersion() { + assertThat( + SystemIndexMappingUpdateService.getUpgradeStatus( + markShardsAvailable(createClusterState(Strings.toString(getMappings("1.0.0", null)))), + DESCRIPTOR + ), + equalTo(UpgradeStatus.NEEDS_MAPPINGS_UPDATE) + ); + } /** * Check that the manager will try to upgrade indices where their mappings are out-of-date. From 76878f156fc31585ce6a4d0c552c3368c52c361f Mon Sep 17 00:00:00 2001 From: Liam Thompson <32779855+leemthompo@users.noreply.github.com> Date: Mon, 5 Aug 2024 09:39:42 +0100 Subject: [PATCH 34/36] [DOCS] Replace local dev instructions in main README (#111352) --- README.asciidoc | 158 ++++++++++++++++++++++++++++++++++-------------- 1 file changed, 113 insertions(+), 45 deletions(-) diff --git a/README.asciidoc b/README.asciidoc index fa479d9c76340..c1945e56b025b 100644 --- a/README.asciidoc +++ b/README.asciidoc @@ -33,76 +33,144 @@ https://www.elastic.co/downloads/elasticsearch[elastic.co/downloads/elasticsearc === Run Elasticsearch locally //// -IMPORTANT: This content is replicated in the Elasticsearch guide. -If you make changes, you must also update setup/set-up-local-dev-deployment.asciidoc. +IMPORTANT: This content is replicated in the Elasticsearch guide. See `run-elasticsearch-locally.asciidoc`. +Both will soon be replaced by a quickstart script. //// -To try out Elasticsearch on your own machine, we recommend using Docker -and running both Elasticsearch and Kibana. -Docker images are available from the https://www.docker.elastic.co[Elastic Docker registry]. +[WARNING] +==== +DO NOT USE THESE INSTRUCTIONS FOR PRODUCTION DEPLOYMENTS. -NOTE: Starting in Elasticsearch 8.0, security is enabled by default. -The first time you start Elasticsearch, TLS encryption is configured automatically, -a password is generated for the `elastic` user, -and a Kibana enrollment token is created so you can connect Kibana to your secured cluster. +This setup is intended for local development and testing only. +==== -For other installation options, see the -https://www.elastic.co/guide/en/elasticsearch/reference/current/install-elasticsearch.html[Elasticsearch installation documentation]. +The following commands help you very quickly spin up a single-node Elasticsearch cluster, together with Kibana in Docker. +Use this setup for local development or testing. -**Start Elasticsearch** +==== Prerequisites -. Install and start https://www.docker.com/products/docker-desktop[Docker -Desktop]. Go to **Preferences > Resources > Advanced** and set Memory to at least 4GB. +If you don't have Docker installed, https://www.docker.com/products/docker-desktop[download and install Docker Desktop] for your operating system. -. Start an Elasticsearch container: -+ +==== Set environment variables + +Configure the following environment variables. + +[source,sh] +---- +export ELASTIC_PASSWORD="" # password for "elastic" username +export KIBANA_PASSWORD="" # Used internally by Kibana, must be at least 6 characters long +---- + +==== Create a Docker network + +To run both Elasticsearch and Kibana, you'll need to create a Docker network: + +[source,sh] ---- -docker network create elastic -docker pull docker.elastic.co/elasticsearch/elasticsearch:{version} <1> -docker run --name elasticsearch --net elastic -p 9200:9200 -p 9300:9300 -e "discovery.type=single-node" -t docker.elastic.co/elasticsearch/elasticsearch:{version} +docker network create elastic-net ---- -<1> Replace {version} with the version of Elasticsearch you want to run. -+ -When you start Elasticsearch for the first time, the generated `elastic` user password and -Kibana enrollment token are output to the terminal. -+ -NOTE: You might need to scroll back a bit in the terminal to view the password -and enrollment token. -. Copy the generated password and enrollment token and save them in a secure -location. These values are shown only when you start Elasticsearch for the first time. -You'll use these to enroll Kibana with your Elasticsearch cluster and log in. +==== Run Elasticsearch + +Start the Elasticsearch container with the following command: -**Start Kibana** +[source,sh] +---- +docker run -p 127.0.0.1:9200:9200 -d --name elasticsearch --network elastic-net \ + -e ELASTIC_PASSWORD=$ELASTIC_PASSWORD \ + -e "discovery.type=single-node" \ + -e "xpack.security.http.ssl.enabled=false" \ + -e "xpack.license.self_generated.type=trial" \ + docker.elastic.co/elasticsearch/elasticsearch:{version} +---- -Kibana enables you to easily send requests to Elasticsearch and analyze, visualize, and manage data interactively. +==== Run Kibana (optional) -. In a new terminal session, start Kibana and connect it to your Elasticsearch container: -+ +To run Kibana, you must first set the `kibana_system` password in the Elasticsearch container. + +[source,sh] ---- -docker pull docker.elastic.co/kibana/kibana:{version} <1> -docker run --name kibana --net elastic -p 5601:5601 docker.elastic.co/kibana/kibana:{version} +# configure the Kibana password in the ES container +curl -u elastic:$ELASTIC_PASSWORD \ + -X POST \ + http://localhost:9200/_security/user/kibana_system/_password \ + -d '{"password":"'"$KIBANA_PASSWORD"'"}' \ + -H 'Content-Type: application/json' ---- -<1> Replace {version} with the version of Kibana you want to run. -+ -When you start Kibana, a unique URL is output to your terminal. +// NOTCONSOLE -. To access Kibana, open the generated URL in your browser. +Start the Kibana container with the following command: - .. Paste the enrollment token that you copied when starting - Elasticsearch and click the button to connect your Kibana instance with Elasticsearch. +[source,sh] +---- +docker run -p 127.0.0.1:5601:5601 -d --name kibana --network elastic-net \ + -e ELASTICSEARCH_URL=http://elasticsearch:9200 \ + -e ELASTICSEARCH_HOSTS=http://elasticsearch:9200 \ + -e ELASTICSEARCH_USERNAME=kibana_system \ + -e ELASTICSEARCH_PASSWORD=$KIBANA_PASSWORD \ + -e "xpack.security.enabled=false" \ + -e "xpack.license.self_generated.type=trial" \ + docker.elastic.co/kibana/kibana:{version} +---- - .. Log in to Kibana as the `elastic` user with the password that was generated - when you started Elasticsearch. +.Trial license +[%collapsible] +==== +The service is started with a trial license. The trial license enables all features of Elasticsearch for a trial period of 30 days. After the trial period expires, the license is downgraded to a basic license, which is free forever. If you prefer to skip the trial and use the basic license, set the value of the `xpack.license.self_generated.type` variable to basic instead. For a detailed feature comparison between the different licenses, refer to our https://www.elastic.co/subscriptions[subscriptions page]. +==== -**Send requests to Elasticsearch** +==== Send requests to Elasticsearch You send data and other requests to Elasticsearch through REST APIs. You can interact with Elasticsearch using any client that sends HTTP requests, such as the https://www.elastic.co/guide/en/elasticsearch/client/index.html[Elasticsearch language clients] and https://curl.se[curl]. + +===== Using curl + +Here's an example curl command to create a new Elasticsearch index, using basic auth: + +[source,sh] +---- +curl -u elastic:$ELASTIC_PASSWORD \ + -X PUT \ + http://localhost:9200/my-new-index \ + -H 'Content-Type: application/json' +---- +// NOTCONSOLE + +===== Using a language client + +To connect to your local dev Elasticsearch cluster with a language client, you can use basic authentication with the `elastic` username and the password you set in the environment variable. + +You'll use the following connection details: + +* **Elasticsearch endpoint**: `http://localhost:9200` +* **Username**: `elastic` +* **Password**: `$ELASTIC_PASSWORD` (Value you set in the environment variable) + +For example, to connect with the Python `elasticsearch` client: + +[source,python] +---- +import os +from elasticsearch import Elasticsearch + +username = 'elastic' +password = os.getenv('ELASTIC_PASSWORD') # Value you set in the environment variable + +client = Elasticsearch( + "http://localhost:9200", + basic_auth=(username, password) +) + +print(client.info()) +---- + +===== Using the Dev Tools Console + Kibana's developer console provides an easy way to experiment and test requests. -To access the console, go to **Management > Dev Tools**. +To access the console, open Kibana, then go to **Management** > **Dev Tools**. **Add data** From 17f819269ae6644658697f8bd81412f360cea26d Mon Sep 17 00:00:00 2001 From: Simon Cooper Date: Mon, 5 Aug 2024 10:31:09 +0100 Subject: [PATCH 35/36] Check the scale before converting xcontent long values, rather than the absolute value (#111538) Large numbers are rejected, small numbers rounded to zero (if rounding enabled) --- .../support/AbstractXContentParser.java | 23 +++++++---- .../xcontent/XContentParserTests.java | 39 +++++++++++++++++++ 2 files changed, 54 insertions(+), 8 deletions(-) diff --git a/libs/x-content/src/main/java/org/elasticsearch/xcontent/support/AbstractXContentParser.java b/libs/x-content/src/main/java/org/elasticsearch/xcontent/support/AbstractXContentParser.java index be100e1a6d120..9672c73ef56df 100644 --- a/libs/x-content/src/main/java/org/elasticsearch/xcontent/support/AbstractXContentParser.java +++ b/libs/x-content/src/main/java/org/elasticsearch/xcontent/support/AbstractXContentParser.java @@ -151,11 +151,8 @@ public int intValue(boolean coerce) throws IOException { protected abstract int doIntValue() throws IOException; - private static BigInteger LONG_MAX_VALUE_AS_BIGINTEGER = BigInteger.valueOf(Long.MAX_VALUE); - private static BigInteger LONG_MIN_VALUE_AS_BIGINTEGER = BigInteger.valueOf(Long.MIN_VALUE); - // weak bounds on the BigDecimal representation to allow for coercion - private static BigDecimal BIGDECIMAL_GREATER_THAN_LONG_MAX_VALUE = BigDecimal.valueOf(Long.MAX_VALUE).add(BigDecimal.ONE); - private static BigDecimal BIGDECIMAL_LESS_THAN_LONG_MIN_VALUE = BigDecimal.valueOf(Long.MIN_VALUE).subtract(BigDecimal.ONE); + private static final BigInteger LONG_MAX_VALUE_AS_BIGINTEGER = BigInteger.valueOf(Long.MAX_VALUE); + private static final BigInteger LONG_MIN_VALUE_AS_BIGINTEGER = BigInteger.valueOf(Long.MIN_VALUE); /** Return the long that {@code stringValue} stores or throws an exception if the * stored value cannot be converted to a long that stores the exact same @@ -170,11 +167,21 @@ private static long toLong(String stringValue, boolean coerce) { final BigInteger bigIntegerValue; try { final BigDecimal bigDecimalValue = new BigDecimal(stringValue); - if (bigDecimalValue.compareTo(BIGDECIMAL_GREATER_THAN_LONG_MAX_VALUE) >= 0 - || bigDecimalValue.compareTo(BIGDECIMAL_LESS_THAN_LONG_MIN_VALUE) <= 0) { + // long can have a maximum of 19 digits - any more than that cannot be a long + // the scale is stored as the negation, so negative scale -> big number + if (bigDecimalValue.scale() < -19) { throw new IllegalArgumentException("Value [" + stringValue + "] is out of range for a long"); } - bigIntegerValue = coerce ? bigDecimalValue.toBigInteger() : bigDecimalValue.toBigIntegerExact(); + // large scale -> very small number + if (bigDecimalValue.scale() > 19) { + if (coerce) { + bigIntegerValue = BigInteger.ZERO; + } else { + throw new ArithmeticException("Number has a decimal part"); + } + } else { + bigIntegerValue = coerce ? bigDecimalValue.toBigInteger() : bigDecimalValue.toBigIntegerExact(); + } } catch (ArithmeticException e) { throw new IllegalArgumentException("Value [" + stringValue + "] has a decimal part"); } catch (NumberFormatException e) { diff --git a/libs/x-content/src/test/java/org/elasticsearch/xcontent/XContentParserTests.java b/libs/x-content/src/test/java/org/elasticsearch/xcontent/XContentParserTests.java index c8df9929d007b..b9cb7df84a8e4 100644 --- a/libs/x-content/src/test/java/org/elasticsearch/xcontent/XContentParserTests.java +++ b/libs/x-content/src/test/java/org/elasticsearch/xcontent/XContentParserTests.java @@ -31,6 +31,7 @@ import static org.hamcrest.Matchers.hasSize; import static org.hamcrest.Matchers.in; import static org.hamcrest.Matchers.instanceOf; +import static org.hamcrest.Matchers.is; import static org.hamcrest.Matchers.nullValue; import static org.junit.internal.matchers.ThrowableMessageMatcher.hasMessage; @@ -74,6 +75,44 @@ public void testFloat() throws IOException { } } + public void testLongCoercion() throws IOException { + XContentType xContentType = randomFrom(XContentType.values()); + + try (XContentBuilder builder = XContentBuilder.builder(xContentType.xContent())) { + builder.startObject(); + builder.field("decimal", "5.5"); + builder.field("expInRange", "5e18"); + builder.field("expTooBig", "2e100"); + builder.field("expTooSmall", "2e-100"); + builder.endObject(); + + try (XContentParser parser = createParser(xContentType.xContent(), BytesReference.bytes(builder))) { + assertThat(parser.nextToken(), is(XContentParser.Token.START_OBJECT)); + + assertThat(parser.nextToken(), is(XContentParser.Token.FIELD_NAME)); + assertThat(parser.currentName(), is("decimal")); + assertThat(parser.nextToken(), is(XContentParser.Token.VALUE_STRING)); + assertThat(parser.longValue(), equalTo(5L)); + + assertThat(parser.nextToken(), is(XContentParser.Token.FIELD_NAME)); + assertThat(parser.currentName(), is("expInRange")); + assertThat(parser.nextToken(), is(XContentParser.Token.VALUE_STRING)); + assertThat(parser.longValue(), equalTo((long) 5e18)); + + assertThat(parser.nextToken(), is(XContentParser.Token.FIELD_NAME)); + assertThat(parser.currentName(), is("expTooBig")); + assertThat(parser.nextToken(), is(XContentParser.Token.VALUE_STRING)); + expectThrows(IllegalArgumentException.class, parser::longValue); + + // too small goes to zero + assertThat(parser.nextToken(), is(XContentParser.Token.FIELD_NAME)); + assertThat(parser.currentName(), is("expTooSmall")); + assertThat(parser.nextToken(), is(XContentParser.Token.VALUE_STRING)); + assertThat(parser.longValue(), equalTo(0L)); + } + } + } + public void testReadList() throws IOException { assertThat(readList("{\"foo\": [\"bar\"]}"), contains("bar")); assertThat(readList("{\"foo\": [\"bar\",\"baz\"]}"), contains("bar", "baz")); From f352418ba7a0c71d12f93d7e4445184676db4b45 Mon Sep 17 00:00:00 2001 From: Albert Zaharovits Date: Mon, 5 Aug 2024 12:34:09 +0300 Subject: [PATCH 36/36] Enforce JKS trustore for Azure IT (#111569) Closes #111279 Closes #111345 Closes #111280 Closes #111307 Closes #111377 --- .../org/elasticsearch/common/ssl/KeyStoreUtil.java | 12 ++++++++++-- .../azure/RepositoryAzureClientYamlTestSuiteIT.java | 13 +++++-------- muted-tests.yml | 12 ------------ .../java/org/elasticsearch/test/TestTrustStore.java | 4 ++-- .../metering/azure/AzureRepositoriesMeteringIT.java | 13 +++++-------- .../AzureSearchableSnapshotsIT.java | 9 +++++---- .../recovery/AzureSnapshotBasedRecoveryIT.java | 13 +++++-------- .../testkit/AzureSnapshotRepoTestKitIT.java | 13 +++++-------- 8 files changed, 37 insertions(+), 52 deletions(-) diff --git a/libs/ssl-config/src/main/java/org/elasticsearch/common/ssl/KeyStoreUtil.java b/libs/ssl-config/src/main/java/org/elasticsearch/common/ssl/KeyStoreUtil.java index aebee89297a88..7f5b005e28470 100644 --- a/libs/ssl-config/src/main/java/org/elasticsearch/common/ssl/KeyStoreUtil.java +++ b/libs/ssl-config/src/main/java/org/elasticsearch/common/ssl/KeyStoreUtil.java @@ -106,8 +106,12 @@ public static KeyStore filter(KeyStore store, Predicate filter) { * @param certificates The root certificates to trust */ public static KeyStore buildTrustStore(Iterable certificates) throws GeneralSecurityException { + return buildTrustStore(certificates, KeyStore.getDefaultType()); + } + + public static KeyStore buildTrustStore(Iterable certificates, String type) throws GeneralSecurityException { assert certificates != null : "Cannot create keystore with null certificates"; - KeyStore store = buildNewKeyStore(); + KeyStore store = buildNewKeyStore(type); int counter = 0; for (Certificate certificate : certificates) { store.setCertificateEntry("cert-" + counter, certificate); @@ -117,7 +121,11 @@ public static KeyStore buildTrustStore(Iterable certificates) throw } private static KeyStore buildNewKeyStore() throws GeneralSecurityException { - KeyStore keyStore = KeyStore.getInstance(KeyStore.getDefaultType()); + return buildNewKeyStore(KeyStore.getDefaultType()); + } + + private static KeyStore buildNewKeyStore(String type) throws GeneralSecurityException { + KeyStore keyStore = KeyStore.getInstance(type); try { keyStore.load(null, null); } catch (IOException e) { diff --git a/modules/repository-azure/src/yamlRestTest/java/org/elasticsearch/repositories/azure/RepositoryAzureClientYamlTestSuiteIT.java b/modules/repository-azure/src/yamlRestTest/java/org/elasticsearch/repositories/azure/RepositoryAzureClientYamlTestSuiteIT.java index c40a0fb4da4b1..a152e1fdf5ecc 100644 --- a/modules/repository-azure/src/yamlRestTest/java/org/elasticsearch/repositories/azure/RepositoryAzureClientYamlTestSuiteIT.java +++ b/modules/repository-azure/src/yamlRestTest/java/org/elasticsearch/repositories/azure/RepositoryAzureClientYamlTestSuiteIT.java @@ -26,15 +26,15 @@ public class RepositoryAzureClientYamlTestSuiteIT extends ESClientYamlSuiteTestCase { private static final boolean USE_FIXTURE = Booleans.parseBoolean(System.getProperty("test.azure.fixture", "true")); + private static final boolean USE_HTTPS_FIXTURE = USE_FIXTURE && ESTestCase.inFipsJvm() == false; + private static final String AZURE_TEST_ACCOUNT = System.getProperty("test.azure.account"); private static final String AZURE_TEST_CONTAINER = System.getProperty("test.azure.container"); private static final String AZURE_TEST_KEY = System.getProperty("test.azure.key"); private static final String AZURE_TEST_SASTOKEN = System.getProperty("test.azure.sas_token"); private static AzureHttpFixture fixture = new AzureHttpFixture( - USE_FIXTURE - ? ESTestCase.inFipsJvm() ? AzureHttpFixture.Protocol.HTTP : AzureHttpFixture.Protocol.HTTPS - : AzureHttpFixture.Protocol.NONE, + USE_HTTPS_FIXTURE ? AzureHttpFixture.Protocol.HTTPS : USE_FIXTURE ? AzureHttpFixture.Protocol.HTTP : AzureHttpFixture.Protocol.NONE, AZURE_TEST_ACCOUNT, AZURE_TEST_CONTAINER, Strings.hasText(AZURE_TEST_KEY) || Strings.hasText(AZURE_TEST_SASTOKEN) @@ -66,11 +66,8 @@ public class RepositoryAzureClientYamlTestSuiteIT extends ESClientYamlSuiteTestC ) .systemProperty("AZURE_POD_IDENTITY_AUTHORITY_HOST", () -> fixture.getMetadataAddress(), s -> USE_FIXTURE) .setting("thread_pool.repository_azure.max", () -> String.valueOf(randomIntBetween(1, 10)), s -> USE_FIXTURE) - .systemProperty( - "javax.net.ssl.trustStore", - () -> trustStore.getTrustStorePath().toString(), - s -> USE_FIXTURE && ESTestCase.inFipsJvm() == false - ) + .systemProperty("javax.net.ssl.trustStore", () -> trustStore.getTrustStorePath().toString(), s -> USE_HTTPS_FIXTURE) + .systemProperty("javax.net.ssl.trustStoreType", () -> "jks", s -> USE_HTTPS_FIXTURE) .build(); @ClassRule(order = 1) diff --git a/muted-tests.yml b/muted-tests.yml index 98a9a52f85a08..96fc68cdc3b8a 100644 --- a/muted-tests.yml +++ b/muted-tests.yml @@ -114,21 +114,9 @@ tests: - class: org.elasticsearch.xpack.security.authc.oidc.OpenIdConnectAuthIT method: testAuthenticateWithCodeFlowAndClientPost issue: https://github.com/elastic/elasticsearch/issues/111396 -- class: org.elasticsearch.xpack.searchablesnapshots.AzureSearchableSnapshotsIT - issue: https://github.com/elastic/elasticsearch/issues/111279 -- class: org.elasticsearch.repositories.azure.RepositoryAzureClientYamlTestSuiteIT - issue: https://github.com/elastic/elasticsearch/issues/111345 -- class: org.elasticsearch.repositories.blobstore.testkit.AzureSnapshotRepoTestKitIT - method: testRepositoryAnalysis - issue: https://github.com/elastic/elasticsearch/issues/111280 -- class: org.elasticsearch.xpack.repositories.metering.azure.AzureRepositoriesMeteringIT - issue: https://github.com/elastic/elasticsearch/issues/111307 - class: org.elasticsearch.xpack.restart.FullClusterRestartIT method: testSingleDoc {cluster=UPGRADED} issue: https://github.com/elastic/elasticsearch/issues/111434 -- class: org.elasticsearch.xpack.snapshotbasedrecoveries.recovery.AzureSnapshotBasedRecoveryIT - method: testRecoveryUsingSnapshots - issue: https://github.com/elastic/elasticsearch/issues/111377 - class: org.elasticsearch.xpack.restart.FullClusterRestartIT method: testDataStreams {cluster=UPGRADED} issue: https://github.com/elastic/elasticsearch/issues/111448 diff --git a/test/framework/src/main/java/org/elasticsearch/test/TestTrustStore.java b/test/framework/src/main/java/org/elasticsearch/test/TestTrustStore.java index e17a309dbc9c8..93a2a4a967592 100644 --- a/test/framework/src/main/java/org/elasticsearch/test/TestTrustStore.java +++ b/test/framework/src/main/java/org/elasticsearch/test/TestTrustStore.java @@ -50,8 +50,8 @@ protected void before() { .stream() .map(i -> (Certificate) i) .toList(); - final var trustStore = KeyStoreUtil.buildTrustStore(certificates); - trustStore.store(jksStream, null); + final var trustStore = KeyStoreUtil.buildTrustStore(certificates, "jks"); + trustStore.store(jksStream, new char[0]); trustStorePath = tmpTrustStorePath; } catch (Exception e) { throw new AssertionError("unexpected", e); diff --git a/x-pack/plugin/repositories-metering-api/qa/azure/src/javaRestTest/java/org/elasticsearch/xpack/repositories/metering/azure/AzureRepositoriesMeteringIT.java b/x-pack/plugin/repositories-metering-api/qa/azure/src/javaRestTest/java/org/elasticsearch/xpack/repositories/metering/azure/AzureRepositoriesMeteringIT.java index 3a66854191088..d38060640b47d 100644 --- a/x-pack/plugin/repositories-metering-api/qa/azure/src/javaRestTest/java/org/elasticsearch/xpack/repositories/metering/azure/AzureRepositoriesMeteringIT.java +++ b/x-pack/plugin/repositories-metering-api/qa/azure/src/javaRestTest/java/org/elasticsearch/xpack/repositories/metering/azure/AzureRepositoriesMeteringIT.java @@ -23,15 +23,15 @@ public class AzureRepositoriesMeteringIT extends AbstractRepositoriesMeteringAPIRestTestCase { private static final boolean USE_FIXTURE = Booleans.parseBoolean(System.getProperty("test.azure.fixture", "true")); + private static final boolean USE_HTTPS_FIXTURE = USE_FIXTURE && ESTestCase.inFipsJvm() == false; + private static final String AZURE_TEST_ACCOUNT = System.getProperty("test.azure.account"); private static final String AZURE_TEST_CONTAINER = System.getProperty("test.azure.container"); private static final String AZURE_TEST_KEY = System.getProperty("test.azure.key"); private static final String AZURE_TEST_SASTOKEN = System.getProperty("test.azure.sas_token"); private static AzureHttpFixture fixture = new AzureHttpFixture( - USE_FIXTURE - ? ESTestCase.inFipsJvm() ? AzureHttpFixture.Protocol.HTTP : AzureHttpFixture.Protocol.HTTPS - : AzureHttpFixture.Protocol.NONE, + USE_HTTPS_FIXTURE ? AzureHttpFixture.Protocol.HTTPS : USE_FIXTURE ? AzureHttpFixture.Protocol.HTTP : AzureHttpFixture.Protocol.NONE, AZURE_TEST_ACCOUNT, AZURE_TEST_CONTAINER, AzureHttpFixture.sharedKeyForAccountPredicate(AZURE_TEST_ACCOUNT) @@ -60,11 +60,8 @@ public class AzureRepositoriesMeteringIT extends AbstractRepositoriesMeteringAPI () -> "ignored;DefaultEndpointsProtocol=https;BlobEndpoint=" + fixture.getAddress(), s -> USE_FIXTURE ) - .systemProperty( - "javax.net.ssl.trustStore", - () -> trustStore.getTrustStorePath().toString(), - s -> USE_FIXTURE && ESTestCase.inFipsJvm() == false - ) + .systemProperty("javax.net.ssl.trustStore", () -> trustStore.getTrustStorePath().toString(), s -> USE_HTTPS_FIXTURE) + .systemProperty("javax.net.ssl.trustStoreType", () -> "jks", s -> USE_HTTPS_FIXTURE) .build(); @ClassRule(order = 1) diff --git a/x-pack/plugin/searchable-snapshots/qa/azure/src/javaRestTest/java/org/elasticsearch/xpack/searchablesnapshots/AzureSearchableSnapshotsIT.java b/x-pack/plugin/searchable-snapshots/qa/azure/src/javaRestTest/java/org/elasticsearch/xpack/searchablesnapshots/AzureSearchableSnapshotsIT.java index d2cdef121fe40..68306cde1c65b 100644 --- a/x-pack/plugin/searchable-snapshots/qa/azure/src/javaRestTest/java/org/elasticsearch/xpack/searchablesnapshots/AzureSearchableSnapshotsIT.java +++ b/x-pack/plugin/searchable-snapshots/qa/azure/src/javaRestTest/java/org/elasticsearch/xpack/searchablesnapshots/AzureSearchableSnapshotsIT.java @@ -24,15 +24,15 @@ public class AzureSearchableSnapshotsIT extends AbstractSearchableSnapshotsRestTestCase { private static final boolean USE_FIXTURE = Booleans.parseBoolean(System.getProperty("test.azure.fixture", "true")); + private static final boolean USE_HTTPS_FIXTURE = USE_FIXTURE && ESTestCase.inFipsJvm() == false; + private static final String AZURE_TEST_ACCOUNT = System.getProperty("test.azure.account"); private static final String AZURE_TEST_CONTAINER = System.getProperty("test.azure.container"); private static final String AZURE_TEST_KEY = System.getProperty("test.azure.key"); private static final String AZURE_TEST_SASTOKEN = System.getProperty("test.azure.sas_token"); private static AzureHttpFixture fixture = new AzureHttpFixture( - USE_FIXTURE - ? ESTestCase.inFipsJvm() ? AzureHttpFixture.Protocol.HTTP : AzureHttpFixture.Protocol.HTTPS - : AzureHttpFixture.Protocol.NONE, + USE_HTTPS_FIXTURE ? AzureHttpFixture.Protocol.HTTPS : USE_FIXTURE ? AzureHttpFixture.Protocol.HTTP : AzureHttpFixture.Protocol.NONE, AZURE_TEST_ACCOUNT, AZURE_TEST_CONTAINER, AzureHttpFixture.sharedKeyForAccountPredicate(AZURE_TEST_ACCOUNT) @@ -66,7 +66,8 @@ public class AzureSearchableSnapshotsIT extends AbstractSearchableSnapshotsRestT .setting("xpack.searchable.snapshot.shared_cache.size", "16MB") .setting("xpack.searchable.snapshot.shared_cache.region_size", "256KB") .setting("xpack.searchable_snapshots.cache_fetch_async_thread_pool.keep_alive", "0ms") - .systemProperty("javax.net.ssl.trustStore", () -> trustStore.getTrustStorePath().toString(), s -> USE_FIXTURE) + .systemProperty("javax.net.ssl.trustStore", () -> trustStore.getTrustStorePath().toString(), s -> USE_HTTPS_FIXTURE) + .systemProperty("javax.net.ssl.trustStoreType", () -> "jks", s -> USE_HTTPS_FIXTURE) .build(); @ClassRule(order = 1) diff --git a/x-pack/plugin/snapshot-based-recoveries/qa/azure/src/javaRestTest/java/org/elasticsearch/xpack/snapshotbasedrecoveries/recovery/AzureSnapshotBasedRecoveryIT.java b/x-pack/plugin/snapshot-based-recoveries/qa/azure/src/javaRestTest/java/org/elasticsearch/xpack/snapshotbasedrecoveries/recovery/AzureSnapshotBasedRecoveryIT.java index bac69158a860c..feec69a0056b3 100644 --- a/x-pack/plugin/snapshot-based-recoveries/qa/azure/src/javaRestTest/java/org/elasticsearch/xpack/snapshotbasedrecoveries/recovery/AzureSnapshotBasedRecoveryIT.java +++ b/x-pack/plugin/snapshot-based-recoveries/qa/azure/src/javaRestTest/java/org/elasticsearch/xpack/snapshotbasedrecoveries/recovery/AzureSnapshotBasedRecoveryIT.java @@ -23,15 +23,15 @@ public class AzureSnapshotBasedRecoveryIT extends AbstractSnapshotBasedRecoveryRestTestCase { private static final boolean USE_FIXTURE = Booleans.parseBoolean(System.getProperty("test.azure.fixture", "true")); + private static final boolean USE_HTTPS_FIXTURE = USE_FIXTURE && ESTestCase.inFipsJvm() == false; + private static final String AZURE_TEST_ACCOUNT = System.getProperty("test.azure.account"); private static final String AZURE_TEST_CONTAINER = System.getProperty("test.azure.container"); private static final String AZURE_TEST_KEY = System.getProperty("test.azure.key"); private static final String AZURE_TEST_SASTOKEN = System.getProperty("test.azure.sas_token"); private static AzureHttpFixture fixture = new AzureHttpFixture( - USE_FIXTURE - ? ESTestCase.inFipsJvm() ? AzureHttpFixture.Protocol.HTTP : AzureHttpFixture.Protocol.HTTPS - : AzureHttpFixture.Protocol.NONE, + USE_HTTPS_FIXTURE ? AzureHttpFixture.Protocol.HTTPS : USE_FIXTURE ? AzureHttpFixture.Protocol.HTTP : AzureHttpFixture.Protocol.NONE, AZURE_TEST_ACCOUNT, AZURE_TEST_CONTAINER, AzureHttpFixture.sharedKeyForAccountPredicate(AZURE_TEST_ACCOUNT) @@ -62,11 +62,8 @@ public class AzureSnapshotBasedRecoveryIT extends AbstractSnapshotBasedRecoveryR s -> USE_FIXTURE ) .setting("xpack.license.self_generated.type", "trial") - .systemProperty( - "javax.net.ssl.trustStore", - () -> trustStore.getTrustStorePath().toString(), - s -> USE_FIXTURE && ESTestCase.inFipsJvm() == false - ) + .systemProperty("javax.net.ssl.trustStore", () -> trustStore.getTrustStorePath().toString(), s -> USE_HTTPS_FIXTURE) + .systemProperty("javax.net.ssl.trustStoreType", () -> "jks", s -> USE_HTTPS_FIXTURE) .build(); @ClassRule(order = 1) diff --git a/x-pack/plugin/snapshot-repo-test-kit/qa/azure/src/javaRestTest/java/org/elasticsearch/repositories/blobstore/testkit/AzureSnapshotRepoTestKitIT.java b/x-pack/plugin/snapshot-repo-test-kit/qa/azure/src/javaRestTest/java/org/elasticsearch/repositories/blobstore/testkit/AzureSnapshotRepoTestKitIT.java index 2f72be9de0e07..959acd2aec213 100644 --- a/x-pack/plugin/snapshot-repo-test-kit/qa/azure/src/javaRestTest/java/org/elasticsearch/repositories/blobstore/testkit/AzureSnapshotRepoTestKitIT.java +++ b/x-pack/plugin/snapshot-repo-test-kit/qa/azure/src/javaRestTest/java/org/elasticsearch/repositories/blobstore/testkit/AzureSnapshotRepoTestKitIT.java @@ -23,15 +23,15 @@ public class AzureSnapshotRepoTestKitIT extends AbstractSnapshotRepoTestKitRestTestCase { private static final boolean USE_FIXTURE = Booleans.parseBoolean(System.getProperty("test.azure.fixture", "true")); + private static final boolean USE_HTTPS_FIXTURE = USE_FIXTURE && ESTestCase.inFipsJvm() == false; + private static final String AZURE_TEST_ACCOUNT = System.getProperty("test.azure.account"); private static final String AZURE_TEST_CONTAINER = System.getProperty("test.azure.container"); private static final String AZURE_TEST_KEY = System.getProperty("test.azure.key"); private static final String AZURE_TEST_SASTOKEN = System.getProperty("test.azure.sas_token"); private static AzureHttpFixture fixture = new AzureHttpFixture( - USE_FIXTURE - ? ESTestCase.inFipsJvm() ? AzureHttpFixture.Protocol.HTTP : AzureHttpFixture.Protocol.HTTPS - : AzureHttpFixture.Protocol.NONE, + USE_HTTPS_FIXTURE ? AzureHttpFixture.Protocol.HTTPS : USE_FIXTURE ? AzureHttpFixture.Protocol.HTTP : AzureHttpFixture.Protocol.NONE, AZURE_TEST_ACCOUNT, AZURE_TEST_CONTAINER, Strings.hasText(AZURE_TEST_KEY) || Strings.hasText(AZURE_TEST_SASTOKEN) @@ -69,11 +69,8 @@ public class AzureSnapshotRepoTestKitIT extends AbstractSnapshotRepoTestKitRestT } }) .systemProperty("AZURE_POD_IDENTITY_AUTHORITY_HOST", () -> fixture.getMetadataAddress(), s -> USE_FIXTURE) - .systemProperty( - "javax.net.ssl.trustStore", - () -> trustStore.getTrustStorePath().toString(), - s -> USE_FIXTURE && ESTestCase.inFipsJvm() == false - ) + .systemProperty("javax.net.ssl.trustStore", () -> trustStore.getTrustStorePath().toString(), s -> USE_HTTPS_FIXTURE) + .systemProperty("javax.net.ssl.trustStoreType", () -> "jks", s -> USE_HTTPS_FIXTURE) .build(); @ClassRule(order = 1)