diff --git a/.backportrc.json b/.backportrc.json index fb0d0d7e6f0b9..22b63a4acb050 100644 --- a/.backportrc.json +++ b/.backportrc.json @@ -1,17 +1,9 @@ { - "upstream": "elastic/elasticsearch", - "targetBranchChoices": [ - "main", - "8.3", - "8.2", - "8.1", - "8.0", - "7.17", - "6.8" - ], - "targetPRLabels": ["backport"], - "branchLabelMapping": { - "^v8.4.0$": "main", - "^v(\\d+).(\\d+).\\d+(?:-(?:alpha|beta|rc)\\d+)?$": "$1.$2" + "upstream" : "elastic/elasticsearch", + "targetBranchChoices" : [ "main", "8.4", "8.3", "8.2", "8.1", "8.0", "7.17", "6.8" ], + "targetPRLabels" : [ "backport" ], + "branchLabelMapping" : { + "^v8.4.3$" : "main", + "^v(\\d+).(\\d+).\\d+(?:-(?:alpha|beta|rc)\\d+)?$" : "$1.$2" } -} +} \ No newline at end of file diff --git a/.ci/bwcVersions b/.ci/bwcVersions index 7cebbfd32c508..87ae821215b55 100644 --- a/.ci/bwcVersions +++ b/.ci/bwcVersions @@ -55,6 +55,7 @@ BWC_VERSION: - "7.17.4" - "7.17.5" - "7.17.6" + - "7.17.7" - "8.0.0" - "8.0.1" - "8.1.0" @@ -70,3 +71,6 @@ BWC_VERSION: - "8.3.2" - "8.3.3" - "8.4.0" + - "8.4.1" + - "8.4.2" + - "8.4.3" diff --git a/.ci/jobs.t/elastic+elasticsearch+multijob+platform-support-unix.yml b/.ci/jobs.t/elastic+elasticsearch+multijob+platform-support-unix.yml index 62dda744b4c9d..6aec8b04a2f30 100644 --- a/.ci/jobs.t/elastic+elasticsearch+multijob+platform-support-unix.yml +++ b/.ci/jobs.t/elastic+elasticsearch+multijob+platform-support-unix.yml @@ -14,7 +14,8 @@ name: os values: - "centos-7&&immutable" - - "amazon&&immutable" + - "amazon-2&&immutable" + - "amazon-2022&&immutable" - "debian-10&&immutable" - "debian-11&&immutable" - "opensuse-15-1&&immutable" diff --git a/.ci/jobs.t/elastic+elasticsearch+periodic+ear.yml b/.ci/jobs.t/elastic+elasticsearch+periodic+ear.yml index 7df53cb6bfb2b..47a8d4f48cc5c 100644 --- a/.ci/jobs.t/elastic+elasticsearch+periodic+ear.yml +++ b/.ci/jobs.t/elastic+elasticsearch+periodic+ear.yml @@ -1,7 +1,6 @@ --- - job: name: elastic+elasticsearch+%BRANCH%+periodic+ear - workspace: /dev/shm/elastic+elasticsearch+%BRANCH%+periodic+ear display-name: "elastic / elasticsearch # %BRANCH% - encryption at rest" description: "The Elasticsearch %BRANCH% branch encryption at rest compatibility tests.\n\n" node: packaging-large diff --git a/.ci/jobs.t/elastic+elasticsearch+periodic+release-tests.yml b/.ci/jobs.t/elastic+elasticsearch+periodic+release-tests.yml index c4050517d3918..0ce470d37b348 100644 --- a/.ci/jobs.t/elastic+elasticsearch+periodic+release-tests.yml +++ b/.ci/jobs.t/elastic+elasticsearch+periodic+release-tests.yml @@ -22,8 +22,14 @@ export BEATS_DIR=$(pwd)/distribution/docker/build/artifacts/beats mkdir -p ${BEATS_DIR} - curl -o "${BEATS_DIR}/metricbeat-${ES_VERSION}-linux-x86_64.tar.gz" https://snapshots-no-kpi.elastic.co/downloads/beats/metricbeat/metricbeat-${ES_VERSION}-SNAPSHOT-linux-x86_64.tar.gz - curl -o "${BEATS_DIR}/filebeat-${ES_VERSION}-linux-x86_64.tar.gz" https://snapshots-no-kpi.elastic.co/downloads/beats/filebeat/filebeat-${ES_VERSION}-SNAPSHOT-linux-x86_64.tar.gz + curl --fail -o "${BEATS_DIR}/metricbeat-${ES_VERSION}-linux-x86_64.tar.gz" https://artifacts-snapshot.elastic.co/beats/${ES_VERSION}-SNAPSHOT/downloads/beats/metricbeat/metricbeat-${ES_VERSION}-SNAPSHOT-linux-x86_64.tar.gz + curl --fail -o "${BEATS_DIR}/filebeat-${ES_VERSION}-linux-x86_64.tar.gz" https://artifacts-snapshot.elastic.co/beats/${ES_VERSION}-SNAPSHOT/downloads/beats/filebeat/filebeat-${ES_VERSION}-SNAPSHOT-linux-x86_64.tar.gz - $WORKSPACE/.ci/scripts/run-gradle.sh -Dbwc.checkout.align=true -Dbuild.snapshot=false \ + # Fetch ML artifacts + export ML_IVY_REPO=$(mktemp -d) + mkdir -p ${ML_IVY_REPO}/maven/org/elasticsearch/ml/ml-cpp/${ES_VERSION} + curl --fail -o "${ML_IVY_REPO}/maven/org/elasticsearch/ml/ml-cpp/${ES_VERSION}/ml-cpp-${ES_VERSION}-deps.zip" https://artifacts-snapshot.elastic.co/ml-cpp/${ES_VERSION}-SNAPSHOT/downloads/ml-cpp/ml-cpp-${ES_VERSION}-SNAPSHOT-deps.zip + curl --fail -o "${ML_IVY_REPO}/maven/org/elasticsearch/ml/ml-cpp/${ES_VERSION}/ml-cpp-${ES_VERSION}-nodeps.zip" https://artifacts-snapshot.elastic.co/ml-cpp/${ES_VERSION}-SNAPSHOT/downloads/ml-cpp/ml-cpp-${ES_VERSION}-SNAPSHOT-nodeps.zip + + $WORKSPACE/.ci/scripts/run-gradle.sh -Dbwc.checkout.align=true -Dbuild.snapshot=false -Dbuild.ml_cpp.repo=file://${ML_IVY_REPO} \ -Dtests.jvm.argline=-Dbuild.snapshot=false -Dlicense.key=${WORKSPACE}/x-pack/license-tools/src/test/resources/public.key -Dbuild.id=deadbeef build diff --git a/.ci/jobs.t/elastic+elasticsearch+pull-request+build-benchmark-part1.yml b/.ci/jobs.t/elastic+elasticsearch+pull-request+build-benchmark-part1.yml deleted file mode 100644 index 0bb880eb22815..0000000000000 --- a/.ci/jobs.t/elastic+elasticsearch+pull-request+build-benchmark-part1.yml +++ /dev/null @@ -1,50 +0,0 @@ ---- -- job: - name: "elastic+elasticsearch+pull-request+build-benchmark-part1" - display-name: "elastic / elasticsearch - pull request build benchmark part 1" - description: "Testing of Elasticsearch pull requests - build benchmark part 1" - workspace: "/dev/shm/elastic+elasticsearch+pull-request+build-bench-1" - scm: - - git: - refspec: "+refs/pull/${ghprbPullId}/*:refs/remotes/origin/pr/${ghprbPullId}/*" - branches: - - "${ghprbActualCommit}" - properties: - - inject: - properties-content: | - BUILD_PERFORMANCE_TEST=true - COMPOSE_HTTP_TIMEOUT=120 - JOB_BRANCH=%BRANCH% - HOME=$JENKINS_HOME - GRADLEW=./gradlew --parallel --scan --build-cache -Dorg.elasticsearch.build.cache.url=https://gradle-enterprise.elastic.co/cache/ - GRADLEW_BAT=./gradlew.bat --parallel --scan --build-cache -Dorg.elasticsearch.build.cache.url=https://gradle-enterprise.elastic.co/cache/ - triggers: - - github-pull-request: - org-list: - - elastic - allow-whitelist-orgs-as-admins: true - trigger-phrase: '.*run\W+elasticsearch-ci/build-bench.*' - github-hooks: true - status-context: elasticsearch-ci/build-benchmark-part1 - cancel-builds-on-update: true - black-list-target-branches: - - 6.8 - excluded-regions: - - ^docs/.* - white-list-labels: - - 'build-benchmark' - builders: - - inject: - properties-file: '.ci/java-versions.properties' - properties-content: | - JAVA_HOME=$HOME/.java/$ES_BUILD_JAVA - RUNTIME_JAVA_HOME=$HOME/.java/$ES_RUNTIME_JAVA - JAVA8_HOME=$HOME/.java/java8 - JAVA11_HOME=$HOME/.java/java11 - - shell: | - #!/usr/local/bin/runbld --redirect-stderr - $WORKSPACE/.ci/scripts/run-gradle.sh :build-tools-internal:bootstrapPerformanceTests - $WORKSPACE/.ci/scripts/install-gradle-profiler.sh - $WORKSPACE/.ci/scripts/run-gradle-profiler.sh --benchmark --scenario-file build-tools-internal/build/performanceTests/elasticsearch-build-benchmark-part1.scenarios --project-dir . --output-dir profile-out - mkdir $WORKSPACE/build - tar -czf $WORKSPACE/build/${BUILD_NUMBER}.tar.bz2 profile-out \ No newline at end of file diff --git a/.ci/jobs.t/elastic+elasticsearch+pull-request+build-benchmark-part2.yml b/.ci/jobs.t/elastic+elasticsearch+pull-request+build-benchmark-part2.yml deleted file mode 100644 index d415d3d3d6fc3..0000000000000 --- a/.ci/jobs.t/elastic+elasticsearch+pull-request+build-benchmark-part2.yml +++ /dev/null @@ -1,50 +0,0 @@ ---- -- job: - name: "elastic+elasticsearch+pull-request+build-benchmark-part2" - display-name: "elastic / elasticsearch - pull request build benchmark part 2" - description: "Testing of Elasticsearch pull requests - build benchmark part 2" - workspace: "/dev/shm/elastic+elasticsearch+pull-request+build-bench-2" - scm: - - git: - refspec: "+refs/pull/${ghprbPullId}/*:refs/remotes/origin/pr/${ghprbPullId}/*" - branches: - - "${ghprbActualCommit}" - properties: - - inject: - properties-content: | - BUILD_PERFORMANCE_TEST=true - COMPOSE_HTTP_TIMEOUT=120 - JOB_BRANCH=%BRANCH% - HOME=$JENKINS_HOME - GRADLEW=./gradlew --parallel --scan --build-cache -Dorg.elasticsearch.build.cache.url=https://gradle-enterprise.elastic.co/cache/ - GRADLEW_BAT=./gradlew.bat --parallel --scan --build-cache -Dorg.elasticsearch.build.cache.url=https://gradle-enterprise.elastic.co/cache/ - triggers: - - github-pull-request: - org-list: - - elastic - allow-whitelist-orgs-as-admins: true - trigger-phrase: '.*run\W+elasticsearch-ci/build-bench.*' - github-hooks: true - status-context: elasticsearch-ci/build-benchmark-part2 - cancel-builds-on-update: true - black-list-target-branches: - - 6.8 - excluded-regions: - - ^docs/.* - white-list-labels: - - 'build-benchmark' - builders: - - inject: - properties-file: '.ci/java-versions.properties' - properties-content: | - JAVA_HOME=$HOME/.java/$ES_BUILD_JAVA - RUNTIME_JAVA_HOME=$HOME/.java/$ES_RUNTIME_JAVA - JAVA8_HOME=$HOME/.java/java8 - JAVA11_HOME=$HOME/.java/java11 - - shell: | - #!/usr/local/bin/runbld --redirect-stderr - $WORKSPACE/.ci/scripts/run-gradle.sh :build-tools-internal:bootstrapPerformanceTests - $WORKSPACE/.ci/scripts/install-gradle-profiler.sh - $WORKSPACE/.ci/scripts/run-gradle-profiler.sh --benchmark --scenario-file build-tools-internal/build/performanceTests/elasticsearch-build-benchmark-part2.scenarios --project-dir . --output-dir profile-out - mkdir $WORKSPACE/build - tar -czf $WORKSPACE/build/${BUILD_NUMBER}.tar.bz2 profile-out \ No newline at end of file diff --git a/.ci/jobs.t/elastic+elasticsearch+pull-request+bwc-snapshots.yml b/.ci/jobs.t/elastic+elasticsearch+pull-request+bwc-snapshots.yml deleted file mode 100644 index 2194bd986a891..0000000000000 --- a/.ci/jobs.t/elastic+elasticsearch+pull-request+bwc-snapshots.yml +++ /dev/null @@ -1,49 +0,0 @@ ---- -- job: - name: "elastic+elasticsearch+pull-request+bwc-snapshots" - display-name: "elastic / elasticsearch - pull request bwc" - description: "Testing of Elasticsearch pull requests - bwc" - project-type: matrix - node: master - child-workspace: "/dev/shm/elastic+elasticsearch+pull-request+bwc" - scm: - - git: - refspec: "+refs/pull/${ghprbPullId}/*:refs/remotes/origin/pr/${ghprbPullId}/*" - branches: - - "${ghprbActualCommit}" - triggers: - - github-pull-request: - org-list: - - elastic - allow-whitelist-orgs-as-admins: true - trigger-phrase: '.*run\W+elasticsearch-ci/bwc.*' - github-hooks: true - status-context: elasticsearch-ci/bwc - cancel-builds-on-update: true - excluded-regions: - - ^docs/.* - black-list-labels: - - '>test-mute' - - 'test-full-bwc' - axes: - - axis: - type: slave - name: nodes - values: - - "general-purpose" - - axis: - type: yaml - filename: ".ci/snapshotBwcVersions" - name: "BWC_VERSION" - builders: - - inject: - properties-file: '.ci/java-versions.properties' - properties-content: | - JAVA_HOME=$HOME/.java/$ES_BUILD_JAVA - RUNTIME_JAVA_HOME=$HOME/.java/$ES_RUNTIME_JAVA - JAVA8_HOME=$HOME/.java/java8 - JAVA11_HOME=$HOME/.java/java11 - JAVA16_HOME=$HOME/.java/openjdk16 - - shell: | - #!/usr/local/bin/runbld --redirect-stderr - $WORKSPACE/.ci/scripts/run-gradle.sh -Dignore.tests.seed v$BWC_VERSION#bwcTest diff --git a/.ci/jobs.t/elastic+elasticsearch+pull-request+cloud-deploy.yml b/.ci/jobs.t/elastic+elasticsearch+pull-request+cloud-deploy.yml deleted file mode 100644 index 0331046542f5c..0000000000000 --- a/.ci/jobs.t/elastic+elasticsearch+pull-request+cloud-deploy.yml +++ /dev/null @@ -1,50 +0,0 @@ ---- -- job: - name: "elastic+elasticsearch+pull-request+cloud-deploy" - display-name: "elastic / elasticsearch - pull request cloud-deploy" - description: "Testing of Elasticsearch pull requests - cloud-deploy" - workspace: "/dev/shm/elastic+elasticsearch+pull-request+cloud-deploy" - node: "general-purpose && docker" - scm: - - git: - refspec: "+refs/pull/${ghprbPullId}/*:refs/remotes/origin/pr/${ghprbPullId}/*" - branches: - - "${ghprbActualCommit}" - triggers: - - github-pull-request: - org-list: - - elastic - allow-whitelist-orgs-as-admins: true - trigger-phrase: '.*run\W+elasticsearch-ci/cloud-deploy.*' - github-hooks: true - status-context: elasticsearch-ci/cloud-deploy - cancel-builds-on-update: true - black-list-target-branches: - - 6.8 - excluded-regions: - - ^docs/.* - white-list-labels: - - 'cloud-deploy' - builders: - - inject: - properties-file: '.ci/java-versions.properties' - properties-content: | - JAVA_HOME=$HOME/.java/$ES_BUILD_JAVA - RUNTIME_JAVA_HOME=$HOME/.java/$ES_RUNTIME_JAVA - - shell: | - #!/usr/local/bin/runbld --redirect-stderr - $WORKSPACE/.ci/scripts/run-gradle.sh buildCloudDockerImage - - shell: | - #!/bin/bash - set +x - VAULT_TOKEN=$(vault write -field=token auth/approle/login role_id=$VAULT_ROLE_ID secret_id=$VAULT_SECRET_ID) - export VAULT_TOKEN - export DOCKER_REGISTRY_USERNAME="$(vault read -field=username secret/elasticsearch-ci/prod_docker_registry_credentials)" - export DOCKER_REGISTRY_PASSWORD="$(vault read -field=password secret/elasticsearch-ci/prod_docker_registry_credentials)" - export ES_VERSION=$(grep 'elasticsearch' build-tools-internal/version.properties | awk '{print $3}') - export DOCKER_TAG=docker.elastic.co/elasticsearch-ci/elasticsearch-cloud:${ES_VERSION}-${ghprbActualCommit:0:7} - docker tag elasticsearch-cloud:test $DOCKER_TAG - echo $DOCKER_REGISTRY_PASSWORD | docker login -u $DOCKER_REGISTRY_USERNAME --password-stdin docker.elastic.co - unset VAULT_TOKEN DOCKER_REGISTRY_USERNAME DOCKER_REGISTRY_PASSWORD - set -x - docker push $DOCKER_TAG diff --git a/.ci/jobs.t/elastic+elasticsearch+pull-request+docs-check.yml b/.ci/jobs.t/elastic+elasticsearch+pull-request+docs-check.yml deleted file mode 100644 index 26d17e60959d6..0000000000000 --- a/.ci/jobs.t/elastic+elasticsearch+pull-request+docs-check.yml +++ /dev/null @@ -1,35 +0,0 @@ ---- -- job: - name: "elastic+elasticsearch+pull-request+docs-check" - display-name: "elastic / elasticsearch - pull request docs-check" - description: "Testing of Elasticsearch pull requests - docs-check" - workspace: "/dev/shm/elastic+elasticsearch+pull-request+docs-check" - scm: - - git: - refspec: "+refs/pull/${ghprbPullId}/*:refs/remotes/origin/pr/${ghprbPullId}/*" - branches: - - "${ghprbActualCommit}" - triggers: - - github-pull-request: - org-list: - - elastic - allow-whitelist-orgs-as-admins: true - trigger-phrase: '.*run\W+elasticsearch-ci/docs-check.*' - github-hooks: true - status-context: elasticsearch-ci/docs-check - cancel-builds-on-update: true - included-regions: - - ^docs/.* - black-list-labels: - - '>test-mute' - builders: - - inject: - properties-file: '.ci/java-versions.properties' - properties-content: | - JAVA_HOME=$HOME/.java/$ES_BUILD_JAVA - JAVA8_HOME=$HOME/.java/java8 - JAVA11_HOME=$HOME/.java/java11 - RUNTIME_JAVA_HOME=$HOME/.java/$ES_RUNTIME_JAVA - - shell: | - #!/usr/local/bin/runbld --redirect-stderr - $WORKSPACE/.ci/scripts/run-gradle.sh -Dignore.tests.seed precommit :docs:check diff --git a/.ci/jobs.t/elastic+elasticsearch+pull-request+eql-correctness.yml b/.ci/jobs.t/elastic+elasticsearch+pull-request+eql-correctness.yml deleted file mode 100644 index be749c200557b..0000000000000 --- a/.ci/jobs.t/elastic+elasticsearch+pull-request+eql-correctness.yml +++ /dev/null @@ -1,43 +0,0 @@ ---- -- job: - name: "elastic+elasticsearch+pull-request+eql-correctness" - display-name: "elastic / elasticsearch - pull request eql-correctness" - description: "Testing of Elasticsearch pull requests - eql-correctness" - workspace: "/dev/shm/elastic+elasticsearch+pull-request+eql-correctness" - scm: - - git: - refspec: "+refs/pull/${ghprbPullId}/*:refs/remotes/origin/pr/${ghprbPullId}/*" - branches: - - "${ghprbActualCommit}" - triggers: - - github-pull-request: - org-list: - - elastic - allow-whitelist-orgs-as-admins: true - trigger-phrase: '.*run\W+elasticsearch-ci/eql-correctness.*' - github-hooks: true - status-context: elasticsearch-ci/eql-correctness - cancel-builds-on-update: true - black-list-target-branches: - - 6.8 - excluded-regions: - - ^docs/.* - black-list-labels: - - '>test-mute' - builders: - - inject: - properties-file: '.ci/java-versions.properties' - properties-content: | - JAVA_HOME=$HOME/.java/$ES_BUILD_JAVA - RUNTIME_JAVA_HOME=$HOME/.java/$ES_RUNTIME_JAVA - - shell: | - #!/usr/local/bin/runbld --redirect-stderr - set +x - VAULT_TOKEN=$(vault write -field=token auth/approle/login role_id=$VAULT_ROLE_ID secret_id=$VAULT_SECRET_ID) - export VAULT_TOKEN - export eql_test_credentials_file="$(pwd)/x-pack/plugin/eql/qa/correctness/credentials.gcs.json" - vault read -field=credentials.gcs.json secret/elasticsearch-ci/eql_test_credentials > ${eql_test_credentials_file} - unset VAULT_TOKEN - set -x - - $WORKSPACE/.ci/scripts/run-gradle.sh -Dignore.tests.seed :x-pack:plugin:eql:qa:correctness:check diff --git a/.ci/jobs.t/elastic+elasticsearch+pull-request+example-plugins.yml b/.ci/jobs.t/elastic+elasticsearch+pull-request+example-plugins.yml deleted file mode 100644 index f9be84bd5f6c7..0000000000000 --- a/.ci/jobs.t/elastic+elasticsearch+pull-request+example-plugins.yml +++ /dev/null @@ -1,37 +0,0 @@ ---- -- job: - name: "elastic+elasticsearch+pull-request+example-plugins" - display-name: "elastic / elasticsearch - pull request example-plugins" - description: "Testing of Elasticsearch pull requests - example-plugins" - workspace: "/dev/shm/elastic+elasticsearch+pull-request+example-plugins" - scm: - - git: - refspec: "+refs/pull/${ghprbPullId}/*:refs/remotes/origin/pr/${ghprbPullId}/*" - branches: - - "${ghprbActualCommit}" - triggers: - - github-pull-request: - org-list: - - elastic - allow-whitelist-orgs-as-admins: true - trigger-phrase: '.*run\W+elasticsearch-ci/example-plugins.*' - github-hooks: true - status-context: elasticsearch-ci/example-plugins - cancel-builds-on-update: true - included-regions: - - build-conventions/.* - - build-tools/.* - - build-tools-internal/.* - - plugins/examples/.* - builders: - - inject: - properties-file: '.ci/java-versions.properties' - properties-content: | - JAVA_HOME=$HOME/.java/$ES_BUILD_JAVA - JAVA8_HOME=$HOME/.java/java8 - JAVA11_HOME=$HOME/.java/java11 - RUNTIME_JAVA_HOME=$HOME/.java/$ES_RUNTIME_JAVA - - shell: | - #!/usr/local/bin/runbld --redirect-stderr - cd plugins/examples - $WORKSPACE/.ci/scripts/run-gradle.sh -Dorg.gradle.jvmargs=-Xmx8g build --include-build $WORKSPACE diff --git a/.ci/jobs.t/elastic+elasticsearch+pull-request+full-bwc.yml b/.ci/jobs.t/elastic+elasticsearch+pull-request+full-bwc.yml deleted file mode 100644 index 2c7d653c5f971..0000000000000 --- a/.ci/jobs.t/elastic+elasticsearch+pull-request+full-bwc.yml +++ /dev/null @@ -1,50 +0,0 @@ ---- -- job: - name: "elastic+elasticsearch+pull-request+full-bwc" - display-name: "elastic / elasticsearch - pull request full-bwc" - description: "Testing of Elasticsearch pull requests - full-bwc" - project-type: matrix - node: master - child-workspace: "/dev/shm/elastic+elasticsearch+pull-request+full-bwc" - scm: - - git: - refspec: "+refs/pull/${ghprbPullId}/*:refs/remotes/origin/pr/${ghprbPullId}/*" - branches: - - "${ghprbActualCommit}" - triggers: - - github-pull-request: - org-list: - - elastic - allow-whitelist-orgs-as-admins: true - trigger-phrase: '.*run\W+elasticsearch-ci/full-bwc.*' - github-hooks: true - status-context: elasticsearch-ci/full-bwc - cancel-builds-on-update: true - excluded-regions: - - ^docs/.* - white-list-labels: - - 'test-full-bwc' - black-list-labels: - - '>test-mute' - axes: - - axis: - type: slave - name: nodes - values: - - "general-purpose" - - axis: - type: yaml - filename: ".ci/bwcVersions" - name: "BWC_VERSION" - builders: - - inject: - properties-file: '.ci/java-versions.properties' - properties-content: | - JAVA_HOME=$HOME/.java/$ES_BUILD_JAVA - RUNTIME_JAVA_HOME=$HOME/.java/$ES_RUNTIME_JAVA - JAVA8_HOME=$HOME/.java/java8 - JAVA11_HOME=$HOME/.java/java11 - JAVA16_HOME=$HOME/.java/openjdk16 - - shell: | - #!/usr/local/bin/runbld --redirect-stderr - $WORKSPACE/.ci/scripts/run-gradle.sh -Dignore.tests.seed v$BWC_VERSION#bwcTest diff --git a/.ci/jobs.t/elastic+elasticsearch+pull-request+packaging-tests-unix-sample.yml b/.ci/jobs.t/elastic+elasticsearch+pull-request+packaging-tests-unix-sample.yml deleted file mode 100644 index 1942bc53ded11..0000000000000 --- a/.ci/jobs.t/elastic+elasticsearch+pull-request+packaging-tests-unix-sample.yml +++ /dev/null @@ -1,51 +0,0 @@ ---- -- job: - name: "elastic+elasticsearch+pull-request+packaging-tests-unix-sample" - display-name: "elastic / elasticsearch - pull request packaging-tests-unix-sample" - description: "Testing of Elasticsearch pull requests - packaging-tests-unix-sample" - project-type: matrix - node: master - scm: - - git: - refspec: "+refs/pull/${ghprbPullId}/*:refs/remotes/origin/pr/${ghprbPullId}/*" - branches: - - "${ghprbActualCommit}" - triggers: - - github-pull-request: - org-list: - - elastic - allow-whitelist-orgs-as-admins: true - trigger-phrase: '.*run\W+elasticsearch-ci/packaging-tests-unix-sample.*' - github-hooks: true - status-context: elasticsearch-ci/packaging-tests-unix-sample - cancel-builds-on-update: true - black-list-target-branches: - - 6.8 - excluded-regions: - - ^docs/.* - black-list-labels: - - '>test-mute' - - ':Delivery/Packaging' - axes: - - axis: - type: label-expression - name: os - values: - - rocky-linux-8-packaging - - ubuntu-20.04-packaging - - axis: - type: user-defined - name: PACKAGING_TASK - values: - - 'destructiveDistroTest.docker' - - 'destructiveDistroTest.packages' - - 'destructiveDistroTest.archives' - builders: - - inject: - properties-file: '.ci/java-versions.properties' - properties-content: | - JAVA_HOME=$HOME/.java/$ES_BUILD_JAVA - RUNTIME_JAVA_HOME=$HOME/.java/$ES_RUNTIME_JAVA - - shell: | - #!/usr/local/bin/runbld --redirect-stderr - ./.ci/scripts/packaging-test.sh --build-cache -Dorg.elasticsearch.build.cache.url=https://gradle-enterprise.elastic.co/cache/ $PACKAGING_TASK diff --git a/.ci/jobs.t/elastic+elasticsearch+pull-request+packaging-tests-unix.yml b/.ci/jobs.t/elastic+elasticsearch+pull-request+packaging-tests-unix.yml deleted file mode 100644 index 79f069c5449cc..0000000000000 --- a/.ci/jobs.t/elastic+elasticsearch+pull-request+packaging-tests-unix.yml +++ /dev/null @@ -1,66 +0,0 @@ ---- -- job: - name: "elastic+elasticsearch+pull-request+packaging-tests-unix" - display-name: "elastic / elasticsearch - pull request packaging-tests-unix" - description: "Testing of Elasticsearch pull requests - packaging-tests-unix" - project-type: matrix - node: master - scm: - - git: - refspec: "+refs/pull/${ghprbPullId}/*:refs/remotes/origin/pr/${ghprbPullId}/*" - branches: - - "${ghprbActualCommit}" - triggers: - - github-pull-request: - org-list: - - elastic - allow-whitelist-orgs-as-admins: true - trigger-phrase: '.*run\W+elasticsearch-ci/packaging-tests-unix.*' - github-hooks: true - status-context: elasticsearch-ci/packaging-tests-unix - cancel-builds-on-update: true - black-list-target-branches: - - 6.8 - excluded-regions: - - ^docs/.* - white-list-labels: - - ':Delivery/Packaging' - black-list-labels: - - '>test-mute' - axes: - - axis: - type: label-expression - name: os - values: - - centos-7-packaging - - debian-10-packaging - - debian-11-packaging - - opensuse-15-1-packaging - - oraclelinux-7-packaging - - oraclelinux-8-packaging - - sles-12-packaging - - sles-15-packaging - - ubuntu-18.04-packaging - - ubuntu-20.04-packaging - - ubuntu-22.04-packaging - - rocky-linux-8-packaging - - rhel-7-packaging - - rhel-8-packaging - - rhel-9-packaging - - almalinux-8-packaging - - axis: - type: user-defined - name: PACKAGING_TASK - values: - - 'destructiveDistroTest.docker' - - 'destructiveDistroTest.packages' - - 'destructiveDistroTest.archives' - builders: - - inject: - properties-file: '.ci/java-versions.properties' - properties-content: | - JAVA_HOME=$HOME/.java/$ES_BUILD_JAVA - RUNTIME_JAVA_HOME=$HOME/.java/$ES_RUNTIME_JAVA - - shell: | - #!/usr/local/bin/runbld --redirect-stderr - ./.ci/scripts/packaging-test.sh --build-cache -Dorg.elasticsearch.build.cache.url=https://gradle-enterprise.elastic.co/cache/ $PACKAGING_TASK diff --git a/.ci/jobs.t/elastic+elasticsearch+pull-request+packaging-tests-windows-nojdk.yml b/.ci/jobs.t/elastic+elasticsearch+pull-request+packaging-tests-windows-nojdk.yml deleted file mode 100644 index 8a09bcb7ec473..0000000000000 --- a/.ci/jobs.t/elastic+elasticsearch+pull-request+packaging-tests-windows-nojdk.yml +++ /dev/null @@ -1,61 +0,0 @@ ---- -- job: - name: "elastic+elasticsearch+pull-request+packaging-tests-windows-nojdk" - display-name: "elastic / elasticsearch - pull request packaging-tests-windows-nojdk" - description: "Testing of Elasticsearch pull requests - packaging-tests-windows-nojdk" - # We use a hard-coded workspace directory here to avoid hitting windows path length limits - child-workspace: "C:\\Users\\jenkins\\workspace\\pr-packaging-windows\\${BUILD_NUMBER}" - project-type: matrix - node: master - scm: - - git: - refspec: "+refs/pull/${ghprbPullId}/*:refs/remotes/origin/pr/${ghprbPullId}/*" - branches: - - "${ghprbActualCommit}" - triggers: - - github-pull-request: - org-list: - - elastic - allow-whitelist-orgs-as-admins: true - trigger-phrase: '.*run\W+elasticsearch-ci/packaging-tests-windows.*' - github-hooks: true - status-context: elasticsearch-ci/packaging-tests-windows - cancel-builds-on-update: true - # We've removed the no-jdk distribution on main as well - white-list-target-branches: - - 7.17 - - 7.16 - excluded-regions: - - ^docs/.* - white-list-labels: - - ':Delivery/Packaging' - black-list-labels: - - '>test-mute' - axes: - - axis: - type: label-expression - name: os - values: - - "windows-2012-r2" - - "windows-2016" - - "windows-2019" - - "windows-2022" - - axis: - type: user-defined - name: PACKAGING_TASK - values: - - 'default-windows-archive' - - 'default-windows-archive-no-jdk' - builders: - - inject: - properties-file: '.ci/java-versions.properties' - properties-content: | - JAVA_HOME=$USERPROFILE\\.java\\$ES_BUILD_JAVA - RUNTIME_JAVA_HOME=$USERPROFILE\\.java\\$ES_RUNTIME_JAVA - - batch: | - del /f /s /q %USERPROFILE%\.gradle\init.d\*.* - mkdir %USERPROFILE%\.gradle\init.d - copy .ci\init.gradle %USERPROFILE%\.gradle\init.d\ - ( - echo powershell.exe .\.ci\scripts\packaging-test.ps1 -GradleTasks destructiveDistroTest.%PACKAGING_TASK% ^|^| exit /b 1 - ) | java -jar "C:\Program Files\infra\bin\runbld" --redirect-stderr - diff --git a/.ci/jobs.t/elastic+elasticsearch+pull-request+packaging-tests-windows-sample-nojdk.yml b/.ci/jobs.t/elastic+elasticsearch+pull-request+packaging-tests-windows-sample-nojdk.yml deleted file mode 100644 index d3874ac433b18..0000000000000 --- a/.ci/jobs.t/elastic+elasticsearch+pull-request+packaging-tests-windows-sample-nojdk.yml +++ /dev/null @@ -1,57 +0,0 @@ ---- -- job: - name: "elastic+elasticsearch+pull-request+packaging-tests-windows-sample-nojdk" - display-name: "elastic / elasticsearch - pull request packaging-tests-windows-sample-nojdk" - description: "Testing of Elasticsearch pull requests - packaging-tests-windows-sample-nojdk" - # We use a hard-coded workspace directory here to avoid hitting windows path length limits - child-workspace: "C:\\Users\\jenkins\\workspace\\pr-packaging-windows\\${BUILD_NUMBER}" - project-type: matrix - node: master - scm: - - git: - refspec: "+refs/pull/${ghprbPullId}/*:refs/remotes/origin/pr/${ghprbPullId}/*" - branches: - - "${ghprbActualCommit}" - triggers: - - github-pull-request: - org-list: - - elastic - allow-whitelist-orgs-as-admins: true - trigger-phrase: '.*run\W+elasticsearch-ci/packaging-tests-windows-sample.*' - github-hooks: true - status-context: elasticsearch-ci/packaging-tests-windows-sample - cancel-builds-on-update: true - # We've removed the no-jdk distribution on main as well - white-list-target-branches: - - 7.17 - - 7.16 - excluded-regions: - - ^docs/.* - black-list-labels: - - '>test-mute' - - ':Delivery/Packaging' - axes: - - axis: - type: label-expression - name: os - values: - - "windows-2019" - - axis: - type: user-defined - name: PACKAGING_TASK - values: - - 'default-windows-archive' - - 'default-windows-archive-no-jdk' - builders: - - inject: - properties-file: '.ci/java-versions.properties' - properties-content: | - JAVA_HOME=$USERPROFILE\\.java\\$ES_BUILD_JAVA - RUNTIME_JAVA_HOME=$USERPROFILE\\.java\\$ES_RUNTIME_JAVA - - batch: | - del /f /s /q %USERPROFILE%\.gradle\init.d\*.* - mkdir %USERPROFILE%\.gradle\init.d - copy .ci\init.gradle %USERPROFILE%\.gradle\init.d\ - ( - echo powershell.exe .\.ci\scripts\packaging-test.ps1 -GradleTasks destructiveDistroTest.%PACKAGING_TASK% ^|^| exit /b 1 - ) | java -jar "C:\Program Files\infra\bin\runbld" --redirect-stderr - diff --git a/.ci/jobs.t/elastic+elasticsearch+pull-request+packaging-tests-windows-sample.yml b/.ci/jobs.t/elastic+elasticsearch+pull-request+packaging-tests-windows-sample.yml deleted file mode 100644 index b98716656ce8e..0000000000000 --- a/.ci/jobs.t/elastic+elasticsearch+pull-request+packaging-tests-windows-sample.yml +++ /dev/null @@ -1,55 +0,0 @@ ---- -- job: - name: "elastic+elasticsearch+pull-request+packaging-tests-windows-sample" - display-name: "elastic / elasticsearch - pull request packaging-tests-windows-sample" - description: "Testing of Elasticsearch pull requests - packaging-tests-windows-sample" - # We use a hard-coded workspace directory here to avoid hitting windows path length limits - child-workspace: "C:\\Users\\jenkins\\workspace\\pr-packaging-windows\\${BUILD_NUMBER}" - project-type: matrix - node: master - scm: - - git: - refspec: "+refs/pull/${ghprbPullId}/*:refs/remotes/origin/pr/${ghprbPullId}/*" - branches: - - "${ghprbActualCommit}" - triggers: - - github-pull-request: - org-list: - - elastic - allow-whitelist-orgs-as-admins: true - trigger-phrase: '.*run\W+elasticsearch-ci/packaging-tests-windows-sample.*' - github-hooks: true - status-context: elasticsearch-ci/packaging-tests-windows-sample - cancel-builds-on-update: true - black-list-target-branches: - - 7.17 - - 7.16 - excluded-regions: - - ^docs/.* - black-list-labels: - - '>test-mute' - - ':Delivery/Packaging' - axes: - - axis: - type: label-expression - name: os - values: - - "windows-2019" - - axis: - type: user-defined - name: PACKAGING_TASK - values: - - 'default-windows-archive' - builders: - - inject: - properties-file: '.ci/java-versions.properties' - properties-content: | - JAVA_HOME=$USERPROFILE\\.java\\$ES_BUILD_JAVA - RUNTIME_JAVA_HOME=$USERPROFILE\\.java\\$ES_RUNTIME_JAVA - - batch: | - del /f /s /q %USERPROFILE%\.gradle\init.d\*.* - mkdir %USERPROFILE%\.gradle\init.d - copy .ci\init.gradle %USERPROFILE%\.gradle\init.d\ - ( - echo powershell.exe .\.ci\scripts\packaging-test.ps1 -GradleTasks destructiveDistroTest.%PACKAGING_TASK% ^|^| exit /b 1 - ) | java -jar "C:\Program Files\infra\bin\runbld" --redirect-stderr - diff --git a/.ci/jobs.t/elastic+elasticsearch+pull-request+packaging-tests-windows.yml b/.ci/jobs.t/elastic+elasticsearch+pull-request+packaging-tests-windows.yml deleted file mode 100644 index 6a7e8e28ab2a7..0000000000000 --- a/.ci/jobs.t/elastic+elasticsearch+pull-request+packaging-tests-windows.yml +++ /dev/null @@ -1,60 +0,0 @@ ---- -- job: - name: "elastic+elasticsearch+pull-request+packaging-tests-windows" - display-name: "elastic / elasticsearch - pull request packaging-tests-windows" - description: "Testing of Elasticsearch pull requests - packaging-tests-windows" - # We use a hard-coded workspace directory here to avoid hitting windows path length limits - child-workspace: "C:\\Users\\jenkins\\workspace\\pr-packaging-windows\\${BUILD_NUMBER}" - project-type: matrix - node: master - scm: - - git: - refspec: "+refs/pull/${ghprbPullId}/*:refs/remotes/origin/pr/${ghprbPullId}/*" - branches: - - "${ghprbActualCommit}" - triggers: - - github-pull-request: - org-list: - - elastic - allow-whitelist-orgs-as-admins: true - trigger-phrase: '.*run\W+elasticsearch-ci/packaging-tests-windows.*' - github-hooks: true - status-context: elasticsearch-ci/packaging-tests-windows - cancel-builds-on-update: true - black-list-target-branches: - - 7.17 - - 7.16 - - 6.8 - excluded-regions: - - ^docs/.* - white-list-labels: - - ':Delivery/Packaging' - black-list-labels: - - '>test-mute' - axes: - - axis: - type: label-expression - name: os - values: - - "windows-2012-r2" - - "windows-2016" - - "windows-2019" - - "windows-2022" - - axis: - type: user-defined - name: PACKAGING_TASK - values: - - 'default-windows-archive' - builders: - - inject: - properties-file: '.ci/java-versions.properties' - properties-content: | - JAVA_HOME=$USERPROFILE\\.java\\$ES_BUILD_JAVA - RUNTIME_JAVA_HOME=$USERPROFILE\\.java\\$ES_RUNTIME_JAVA - - batch: | - del /f /s /q %USERPROFILE%\.gradle\init.d\*.* - mkdir %USERPROFILE%\.gradle\init.d - copy .ci\init.gradle %USERPROFILE%\.gradle\init.d\ - ( - echo powershell.exe .\.ci\scripts\packaging-test.ps1 -GradleTasks destructiveDistroTest.%PACKAGING_TASK% ^|^| exit /b 1 - ) | java -jar "C:\Program Files\infra\bin\runbld" --redirect-stderr - diff --git a/.ci/jobs.t/elastic+elasticsearch+pull-request+packaging-upgrade-tests.yml b/.ci/jobs.t/elastic+elasticsearch+pull-request+packaging-upgrade-tests.yml deleted file mode 100644 index 97f7b1faee25f..0000000000000 --- a/.ci/jobs.t/elastic+elasticsearch+pull-request+packaging-upgrade-tests.yml +++ /dev/null @@ -1,53 +0,0 @@ ---- -- job: - name: "elastic+elasticsearch+pull-request+packaging-upgrade-tests" - display-name: "elastic / elasticsearch - pull request packaging-upgrade-tests" - description: "Testing of Elasticsearch pull requests - packaging-upgrade-tests" - project-type: matrix - node: master - child-workspace: "/dev/shm/elastic+elasticsearch+pull-request+packaging-upgrade-tests" - scm: - - git: - refspec: "+refs/pull/${ghprbPullId}/*:refs/remotes/origin/pr/${ghprbPullId}/*" - branches: - - "${ghprbActualCommit}" - triggers: - - github-pull-request: - org-list: - - elastic - allow-whitelist-orgs-as-admins: true - trigger-phrase: '.*run\W+elasticsearch-ci/packaging-upgrade-tests.*' - github-hooks: true - status-context: elasticsearch-ci/packaging-upgrade-tests - cancel-builds-on-update: true - black-list-target-branches: - - 6.8 - excluded-regions: - - ^docs/.* - white-list-labels: - - ':Delivery/Packaging' - black-list-labels: - - '>test-mute' - axes: - - axis: - type: label-expression - name: os - values: - - rocky-linux-8-packaging - - ubuntu-20.04-packaging - - axis: - type: yaml - filename: ".ci/bwcVersions" - name: "BWC_VERSION" - builders: - - inject: - properties-file: '.ci/java-versions.properties' - properties-content: | - JAVA_HOME=$HOME/.java/$ES_BUILD_JAVA - RUNTIME_JAVA_HOME=$HOME/.java/$ES_RUNTIME_JAVA - JAVA8_HOME=$HOME/.java/java8 - JAVA11_HOME=$HOME/.java/java11 - JAVA16_HOME=$HOME/.java/openjdk16 - - shell: | - #!/usr/local/bin/runbld --redirect-stderr - ./.ci/scripts/packaging-test.sh --build-cache -Dorg.elasticsearch.build.cache.url=https://gradle-enterprise.elastic.co/cache/ destructiveDistroUpgradeTest.v$BWC_VERSION diff --git a/.ci/jobs.t/elastic+elasticsearch+pull-request+part-1-fips.yml b/.ci/jobs.t/elastic+elasticsearch+pull-request+part-1-fips.yml deleted file mode 100644 index 56d6e1d45220d..0000000000000 --- a/.ci/jobs.t/elastic+elasticsearch+pull-request+part-1-fips.yml +++ /dev/null @@ -1,39 +0,0 @@ ---- -- job: - name: "elastic+elasticsearch+pull-request+part-1-fips" - display-name: "elastic / elasticsearch - pull request part-1 fips" - description: "Testing of Elasticsearch pull requests - part-1 fips" - workspace: "/dev/shm/elastic+elasticsearch+pull-request+part-1-fips" - scm: - - git: - refspec: "+refs/pull/${ghprbPullId}/*:refs/remotes/origin/pr/${ghprbPullId}/*" - branches: - - "${ghprbActualCommit}" - triggers: - - github-pull-request: - org-list: - - elastic - allow-whitelist-orgs-as-admins: true - trigger-phrase: '.*run\W+elasticsearch-ci/part-1-fips.*' - github-hooks: true - status-context: elasticsearch-ci/part-1-fips - cancel-builds-on-update: true - black-list-target-branches: - - 6.8 - excluded-regions: - - ^docs/.* - white-list-labels: - - 'Team:Security' - black-list-labels: - - '>test-mute' - builders: - - inject: - # Use FIPS-specific Java versions - properties-file: '.ci/java-versions-fips.properties' - properties-content: | - JAVA_HOME=$HOME/.java/$ES_BUILD_JAVA - RUNTIME_JAVA_HOME=$HOME/.java/$ES_RUNTIME_JAVA - JAVA16_HOME=$HOME/.java/openjdk16 - - shell: | - #!/usr/local/bin/runbld --redirect-stderr - $WORKSPACE/.ci/scripts/run-gradle.sh -Dignore.tests.seed -Dtests.fips.enabled=true checkPart1 diff --git a/.ci/jobs.t/elastic+elasticsearch+pull-request+part-1-windows.yml b/.ci/jobs.t/elastic+elasticsearch+pull-request+part-1-windows.yml deleted file mode 100644 index dbb54e37fb23f..0000000000000 --- a/.ci/jobs.t/elastic+elasticsearch+pull-request+part-1-windows.yml +++ /dev/null @@ -1,44 +0,0 @@ ---- -- job: - name: "elastic+elasticsearch+pull-request+part-1-windows" - display-name: "elastic / elasticsearch - pull request part-1 windows" - description: "Testing of Elasticsearch pull requests - part-1 windows" - node: "windows-immutable" - workspace: "C:\\Users\\jenkins\\workspace\\pr-part-1\\${BUILD_NUMBER}" - scm: - - git: - refspec: "+refs/pull/${ghprbPullId}/*:refs/remotes/origin/pr/${ghprbPullId}/*" - branches: - - "${ghprbActualCommit}" - triggers: - - github-pull-request: - org-list: - - elastic - allow-whitelist-orgs-as-admins: true - trigger-phrase: '.*run\W+elasticsearch-ci/part-1-windows.*' - github-hooks: true - status-context: elasticsearch-ci/part-1-windows - cancel-builds-on-update: true - black-list-target-branches: - - 6.8 - excluded-regions: - - ^docs/.* - white-list-labels: - - 'test-windows' - black-list-labels: - - '>test-mute' - builders: - - inject: - properties-file: '.ci/java-versions.properties' - properties-content: | - JAVA_HOME=$USERPROFILE\\.java\\$ES_BUILD_JAVA - RUNTIME_JAVA_HOME=$USERPROFILE\\.java\\$ES_RUNTIME_JAVA - JAVA16_HOME=$USERPROFILE\\.java\\openjdk16 - GRADLE_TASK=checkPart1 - - batch: | - del /f /s /q %USERPROFILE%\.gradle\init.d\*.* - mkdir %USERPROFILE%\.gradle\init.d - copy .ci\init.gradle %USERPROFILE%\.gradle\init.d\ - ( - echo call %GRADLEW_BAT% --max-workers=4 -Dbwc.checkout.align=true %GRADLE_TASK% ^|^| exit /b 1 - ) | java -jar "C:\Program Files\infra\bin\runbld" --redirect-stderr - diff --git a/.ci/jobs.t/elastic+elasticsearch+pull-request+part-1.yml b/.ci/jobs.t/elastic+elasticsearch+pull-request+part-1.yml deleted file mode 100644 index 8d4f4fbe31678..0000000000000 --- a/.ci/jobs.t/elastic+elasticsearch+pull-request+part-1.yml +++ /dev/null @@ -1,5 +0,0 @@ ---- -jjbb-template: pull-request-gradle-unix.yml -vars: - - pr-job: "part-1" - - gradle-args: "-Dignore.tests.seed checkPart1" diff --git a/.ci/jobs.t/elastic+elasticsearch+pull-request+part-2-fips.yml b/.ci/jobs.t/elastic+elasticsearch+pull-request+part-2-fips.yml deleted file mode 100644 index 962ccea646aab..0000000000000 --- a/.ci/jobs.t/elastic+elasticsearch+pull-request+part-2-fips.yml +++ /dev/null @@ -1,39 +0,0 @@ ---- -- job: - name: "elastic+elasticsearch+pull-request+part-2-fips" - display-name: "elastic / elasticsearch - pull request part-2 fips" - description: "Testing of Elasticsearch pull requests - part-2 fips" - workspace: "/dev/shm/elastic+elasticsearch+pull-request+part-2-fips" - scm: - - git: - refspec: "+refs/pull/${ghprbPullId}/*:refs/remotes/origin/pr/${ghprbPullId}/*" - branches: - - "${ghprbActualCommit}" - triggers: - - github-pull-request: - org-list: - - elastic - allow-whitelist-orgs-as-admins: true - trigger-phrase: '.*run\W+elasticsearch-ci/part-2-fips.*' - github-hooks: true - status-context: elasticsearch-ci/part-2-fips - cancel-builds-on-update: true - black-list-target-branches: - - 6.8 - excluded-regions: - - ^docs/.* - white-list-labels: - - 'Team:Security' - black-list-labels: - - '>test-mute' - builders: - - inject: - # Use FIPS-specific Java versions - properties-file: '.ci/java-versions-fips.properties' - properties-content: | - JAVA_HOME=$HOME/.java/$ES_BUILD_JAVA - RUNTIME_JAVA_HOME=$HOME/.java/$ES_RUNTIME_JAVA - JAVA16_HOME=$HOME/.java/openjdk16 - - shell: | - #!/usr/local/bin/runbld --redirect-stderr - $WORKSPACE/.ci/scripts/run-gradle.sh -Dignore.tests.seed -Dtests.fips.enabled=true checkPart2 diff --git a/.ci/jobs.t/elastic+elasticsearch+pull-request+part-2-windows.yml b/.ci/jobs.t/elastic+elasticsearch+pull-request+part-2-windows.yml deleted file mode 100644 index ab367188cb8e9..0000000000000 --- a/.ci/jobs.t/elastic+elasticsearch+pull-request+part-2-windows.yml +++ /dev/null @@ -1,44 +0,0 @@ ---- -- job: - name: "elastic+elasticsearch+pull-request+part-2-windows" - display-name: "elastic / elasticsearch - pull request part-2 windows" - description: "Testing of Elasticsearch pull requests - part-2 windows" - node: "windows-immutable" - workspace: "C:\\Users\\jenkins\\workspace\\pr-part-2\\${BUILD_NUMBER}" - scm: - - git: - refspec: "+refs/pull/${ghprbPullId}/*:refs/remotes/origin/pr/${ghprbPullId}/*" - branches: - - "${ghprbActualCommit}" - triggers: - - github-pull-request: - org-list: - - elastic - allow-whitelist-orgs-as-admins: true - trigger-phrase: '.*run\W+elasticsearch-ci/part-2-windows.*' - github-hooks: true - status-context: elasticsearch-ci/part-2-windows - cancel-builds-on-update: true - black-list-target-branches: - - 6.8 - excluded-regions: - - ^docs/.* - white-list-labels: - - 'test-windows' - black-list-labels: - - '>test-mute' - builders: - - inject: - properties-file: '.ci/java-versions.properties' - properties-content: | - JAVA_HOME=$USERPROFILE\\.java\\$ES_BUILD_JAVA - RUNTIME_JAVA_HOME=$USERPROFILE\\.java\\$ES_RUNTIME_JAVA - JAVA16_HOME=$USERPROFILE\\.java\\openjdk16 - GRADLE_TASK=checkPart2 - - batch: | - del /f /s /q %USERPROFILE%\.gradle\init.d\*.* - mkdir %USERPROFILE%\.gradle\init.d - copy .ci\init.gradle %USERPROFILE%\.gradle\init.d\ - ( - echo call %GRADLEW_BAT% --max-workers=4 -Dbwc.checkout.align=true %GRADLE_TASK% ^|^| exit /b 1 - ) | java -jar "C:\Program Files\infra\bin\runbld" --redirect-stderr - diff --git a/.ci/jobs.t/elastic+elasticsearch+pull-request+part-2.yml b/.ci/jobs.t/elastic+elasticsearch+pull-request+part-2.yml deleted file mode 100644 index b77edcd3759be..0000000000000 --- a/.ci/jobs.t/elastic+elasticsearch+pull-request+part-2.yml +++ /dev/null @@ -1,5 +0,0 @@ ---- -jjbb-template: pull-request-gradle-unix.yml -vars: - - pr-job: "part-2" - - gradle-args: "-Dignore.tests.seed checkPart2" diff --git a/.ci/jobs.t/elastic+elasticsearch+pull-request+part-3.yml b/.ci/jobs.t/elastic+elasticsearch+pull-request+part-3.yml deleted file mode 100644 index e0a3e9cb8fd71..0000000000000 --- a/.ci/jobs.t/elastic+elasticsearch+pull-request+part-3.yml +++ /dev/null @@ -1,33 +0,0 @@ ---- -- job: - name: "elastic+elasticsearch+pull-request+part-3" - display-name: "elastic / elasticsearch - pull request part-3" - description: "Testing of Elasticsearch pull requests - part 3" - workspace: "/dev/shm/elastic+elasticsearch+pull-request+part-3" - scm: - - git: - refspec: "+refs/pull/${ghprbPullId}/*:refs/remotes/origin/pr/${ghprbPullId}/*" - branches: - - "${ghprbActualCommit}" - triggers: - - github-pull-request: - org-list: - - elastic - allow-whitelist-orgs-as-admins: true - trigger-phrase: '.*run\W+elasticsearch-ci/part-3.*' - github-hooks: true - status-context: elasticsearch-ci/part-3 - cancel-builds-on-update: true - white-list-labels: - - 'test-part-3' - builders: - - inject: - properties-file: '.ci/java-versions.properties' - properties-content: | - JAVA_HOME=$HOME/.java/$ES_BUILD_JAVA - RUNTIME_JAVA_HOME=$HOME/.java/$ES_RUNTIME_JAVA - JAVA8_HOME=$HOME/.java/java8 - JAVA11_HOME=$HOME/.java/java11 - - shell: | - #!/usr/local/bin/runbld --redirect-stderr - $WORKSPACE/.ci/scripts/run-gradle.sh -Dignore.tests.seed checkPart3 diff --git a/.ci/jobs.t/elastic+elasticsearch+pull-request+precommit.yml b/.ci/jobs.t/elastic+elasticsearch+pull-request+precommit.yml deleted file mode 100644 index aadb8464cff55..0000000000000 --- a/.ci/jobs.t/elastic+elasticsearch+pull-request+precommit.yml +++ /dev/null @@ -1,33 +0,0 @@ ---- -- job: - name: "elastic+elasticsearch+pull-request+precommit" - display-name: "elastic / elasticsearch - pull request precommit" - description: "Testing of Elasticsearch pull requests - precommit" - workspace: "/dev/shm/elastic+elasticsearch+pull-request+precommit" - scm: - - git: - refspec: "+refs/pull/${ghprbPullId}/*:refs/remotes/origin/pr/${ghprbPullId}/*" - branches: - - "${ghprbActualCommit}" - triggers: - - github-pull-request: - org-list: - - elastic - allow-whitelist-orgs-as-admins: true - trigger-phrase: '.*run\W+elasticsearch-ci/precommit.*' - github-hooks: true - status-context: elasticsearch-ci/precommit - cancel-builds-on-update: true - white-list-labels: - - '>test-mute' - builders: - - inject: - properties-file: '.ci/java-versions.properties' - properties-content: | - JAVA_HOME=$HOME/.java/$ES_BUILD_JAVA - RUNTIME_JAVA_HOME=$HOME/.java/$ES_RUNTIME_JAVA - JAVA8_HOME=$HOME/.java/java8 - JAVA11_HOME=$HOME/.java/java11 - - shell: | - #!/usr/local/bin/runbld --redirect-stderr - $WORKSPACE/.ci/scripts/run-gradle.sh -Dignore.tests.seed precommit diff --git a/.ci/jobs.t/elastic+elasticsearch+pull-request+release-tests.yml b/.ci/jobs.t/elastic+elasticsearch+pull-request+release-tests.yml deleted file mode 100644 index 1f08d36cfd97c..0000000000000 --- a/.ci/jobs.t/elastic+elasticsearch+pull-request+release-tests.yml +++ /dev/null @@ -1,51 +0,0 @@ ---- -- job: - name: "elastic+elasticsearch+pull-request+release-tests" - display-name: "elastic / elasticsearch - pull request release-tests" - description: "Testing of Elasticsearch pull requests - release-tests" - # Don't use ramdisk since this build generates lots of large artifacts and results in oomkiller issues - # workspace: "/dev/shm/elastic+elasticsearch+pull-request+release-tests" - node: "general-purpose && docker" - scm: - - git: - refspec: "+refs/pull/${ghprbPullId}/*:refs/remotes/origin/pr/${ghprbPullId}/*" - branches: - - "${ghprbActualCommit}" - triggers: - - github-pull-request: - org-list: - - elastic - allow-whitelist-orgs-as-admins: true - trigger-phrase: '.*run\W+elasticsearch-ci/release-tests.*' - github-hooks: true - status-context: elasticsearch-ci/release-tests - cancel-builds-on-update: true - excluded-regions: - - ^docs/.* - white-list-labels: - - 'test-release' - black-list-target-branches: - - 7.15 - - 6.8 - builders: - - inject: - properties-file: '.ci/java-versions.properties' - properties-content: | - JAVA_HOME=$HOME/.java/$ES_BUILD_JAVA - RUNTIME_JAVA_HOME=$HOME/.java/$ES_RUNTIME_JAVA - JAVA8_HOME=$HOME/.java/java8 - JAVA11_HOME=$HOME/.java/java11 - JAVA16_HOME=$HOME/.java/openjdk16 - - shell: | - #!/usr/local/bin/runbld --redirect-stderr - - # Fetch beats artifacts - export ES_VERSION=$(grep 'elasticsearch' build-tools-internal/version.properties | awk '{print $3}') - export BEATS_DIR=$(pwd)/distribution/docker/build/artifacts/beats - - mkdir -p ${BEATS_DIR} - curl -o "${BEATS_DIR}/metricbeat-${ES_VERSION}-linux-x86_64.tar.gz" https://snapshots-no-kpi.elastic.co/downloads/beats/metricbeat/metricbeat-${ES_VERSION}-SNAPSHOT-linux-x86_64.tar.gz - curl -o "${BEATS_DIR}/filebeat-${ES_VERSION}-linux-x86_64.tar.gz" https://snapshots-no-kpi.elastic.co/downloads/beats/filebeat/filebeat-${ES_VERSION}-SNAPSHOT-linux-x86_64.tar.gz - - $WORKSPACE/.ci/scripts/run-gradle.sh -Dbwc.checkout.align=true -Dbuild.snapshot=false \ - -Dtests.jvm.argline=-Dbuild.snapshot=false -Dlicense.key=${WORKSPACE}/x-pack/license-tools/src/test/resources/public.key -Dbuild.id=deadbeef build diff --git a/.ci/jobs.t/elastic+elasticsearch+pull-request+rest-compatibility.yml b/.ci/jobs.t/elastic+elasticsearch+pull-request+rest-compatibility.yml deleted file mode 100644 index f99a3c1bdd32c..0000000000000 --- a/.ci/jobs.t/elastic+elasticsearch+pull-request+rest-compatibility.yml +++ /dev/null @@ -1,41 +0,0 @@ ---- -- job: - name: "elastic+elasticsearch+pull-request+rest-compatibility" - display-name: "elastic / elasticsearch - pull request rest-compatibility" - description: "Testing of Elasticsearch pull requests - rest-compatibility" - workspace: "/dev/shm/elastic+elasticsearch+pull-request+rest-compatibility" - scm: - - git: - refspec: "+refs/pull/${ghprbPullId}/*:refs/remotes/origin/pr/${ghprbPullId}/*" - branches: - - "${ghprbActualCommit}" - triggers: - - github-pull-request: - org-list: - - elastic - allow-whitelist-orgs-as-admins: true - trigger-phrase: '.*run\W+elasticsearch-ci/rest-compatibility.*' - github-hooks: true - status-context: elasticsearch-ci/rest-compatibility - cancel-builds-on-update: true - black-list-target-branches: - - 7.17 - - 7.16 - - 7.15 - - 6.8 - excluded-regions: - - ^docs/.* - black-list-labels: - - '>test-mute' - builders: - - inject: - properties-file: '.ci/java-versions.properties' - properties-content: | - JAVA_HOME=$HOME/.java/$ES_BUILD_JAVA - RUNTIME_JAVA_HOME=$HOME/.java/$ES_RUNTIME_JAVA - JAVA8_HOME=$HOME/.java/java8 - JAVA11_HOME=$HOME/.java/java11 - JAVA16_HOME=$HOME/.java/openjdk16 - - shell: | - #!/usr/local/bin/runbld --redirect-stderr - $WORKSPACE/.ci/scripts/run-gradle.sh -Dignore.tests.seed checkRestCompat diff --git a/.ci/scripts/packaging-test.sh b/.ci/scripts/packaging-test.sh index 7ef82371f6ad7..7b0e8f3320bed 100755 --- a/.ci/scripts/packaging-test.sh +++ b/.ci/scripts/packaging-test.sh @@ -43,6 +43,13 @@ if [ -f "/etc/os-release" ] ; then sudo apt-get install -y --allow-downgrades lintian=2.15.0 fi fi + if [[ "$ID" == "rhel" ]] ; then + # Downgrade containerd if necessary to work around runc bug + # See: https://github.com/opencontainers/runc/issues/3551 + if containerd -version | grep -sF 1.6.7; then + sudo yum downgrade -y containerd.io + fi + fi else cat /etc/issue || true fi diff --git a/.ci/snapshotBwcVersions b/.ci/snapshotBwcVersions index 6382a1b6f8b7b..b7d84e5a6bcdd 100644 --- a/.ci/snapshotBwcVersions +++ b/.ci/snapshotBwcVersions @@ -1,4 +1,3 @@ BWC_VERSION: - - "7.17.6" - - "8.3.3" - - "8.4.0" + - "7.17.7" + - "8.4.3" diff --git a/README.asciidoc b/README.asciidoc index 1391b1903f7f8..2dab5d7fb650f 100644 --- a/README.asciidoc +++ b/README.asciidoc @@ -26,9 +26,156 @@ If you prefer to install and manage Elasticsearch yourself, you can download the latest version from https://www.elastic.co/downloads/elasticsearch[elastic.co/downloads/elasticsearch]. -For more installation options, see the -https://www.elastic.co/guide/en/elasticsearch/reference/current/install-elasticsearch.html[Elasticsearch installation -documentation]. +=== Run Elasticsearch locally + +//// +IMPORTANT: This content is replicated in the Elasticsearch guide. +If you make changes, you must also update setup/set-up-local-dev-deployment.asciidoc. +//// + +To try out Elasticsearch on your own machine, we recommend using Docker +and running both Elasticsearch and Kibana. +Docker images are available from the https://www.docker.elastic.co[Elastic Docker registry]. + +NOTE: Starting in Elasticsearch 8.0, security is enabled by default. +The first time you start Elasticsearch, TLS encryption is configured automatically, +a password is generated for the `elastic` user, +and a Kibana enrollment token is created so you can connect Kibana to your secured cluster. + +For other installation options, see the +https://www.elastic.co/guide/en/elasticsearch/reference/current/install-elasticsearch.html[Elasticsearch installation documentation]. + +**Start Elasticsearch** + +. Install and start https://www.docker.com/products/docker-desktop[Docker +Desktop]. Go to **Preferences > Resources > Advanced** and set Memory to at least 4GB. + +. Start an Elasticsearch container: ++ +---- +docker network create elastic +docker pull docker.elastic.co/elasticsearch/elasticsearch:{version} <1> +docker run --name elasticsearch --net elastic -p 9200:9200 -p 9300:9300 -e "discovery.type=single-node" -t docker.elastic.co/elasticsearch/elasticsearch:{version} +---- +<1> Replace {version} with the version of Elasticsearch you want to run. ++ +When you start Elasticsearch for the first time, the generated `elastic` user password and +Kibana enrollment token are output to the terminal. ++ +NOTE: You might need to scroll back a bit in the terminal to view the password +and enrollment token. + +. Copy the generated password and enrollment token and save them in a secure +location. These values are shown only when you start Elasticsearch for the first time. +You'll use these to enroll Kibana with your Elasticsearch cluster and log in. + +**Start Kibana** + +Kibana enables you to easily send requests to Elasticsearch and analyze, visualize, and manage data interactively. + +. In a new terminal session, start Kibana and connect it to your Elasticsearch container: ++ +---- +docker pull docker.elastic.co/kibana/kibana:{version} <1> +docker run --name kibana --net elastic -p 5601:5601 docker.elastic.co/kibana/kibana:{version} +---- +<1> Replace {version} with the version of Kibana you want to run. ++ +When you start Kibana, a unique URL is output to your terminal. + +. To access Kibana, open the generated URL in your browser. + + .. Paste the enrollment token that you copied when starting + Elasticsearch and click the button to connect your Kibana instance with Elasticsearch. + + .. Log in to Kibana as the `elastic` user with the password that was generated + when you started Elasticsearch. + +**Send requests to Elasticsearch** + +You send data and other requests to Elasticsearch through REST APIs. +You can interact with Elasticsearch using any client that sends HTTP requests, +such as the https://www.elastic.co/guide/en/elasticsearch/client/index.html[Elasticsearch +language clients] and https://curl.se[curl]. +Kibana's developer console provides an easy way to experiment and test requests. +To access the console, go to **Management > Dev Tools**. + +**Add data** + +You index data into Elasticsearch by sending JSON objects (documents) through the REST APIs. +Whether you have structured or unstructured text, numerical data, or geospatial data, +Elasticsearch efficiently stores and indexes it in a way that supports fast searches. + +For timestamped data such as logs and metrics, you typically add documents to a +data stream made up of multiple auto-generated backing indices. + +To add a single document to an index, submit an HTTP post request that targets the index. + +---- +POST /customer/_doc/1 +{ + "firstname": "Jennifer", + "lastname": "Walters" +} +---- + +This request automatically creates the `customer` index if it doesn't exist, +adds a new document that has an ID of 1, and +stores and indexes the `firstname` and `lastname` fields. + +The new document is available immediately from any node in the cluster. +You can retrieve it with a GET request that specifies its document ID: + +---- +GET /customer/_doc/1 +---- + +To add multiple documents in one request, use the `_bulk` API. +Bulk data must be newline-delimited JSON (NDJSON). +Each line must end in a newline character (`\n`), including the last line. + +---- +PUT customer/_bulk +{ "create": { } } +{ "firstname": "Monica","lastname":"Rambeau"} +{ "create": { } } +{ "firstname": "Carol","lastname":"Danvers"} +{ "create": { } } +{ "firstname": "Wanda","lastname":"Maximoff"} +{ "create": { } } +{ "firstname": "Jennifer","lastname":"Takeda"} +---- + +**Search** + +Indexed documents are available for search in near real-time. +The following search matches all customers with a first name of _Jennifer_ +in the `customer` index. + +---- +GET customer/_search +{ + "query" : { + "match" : { "firstname": "Jennifer" } + } +} +---- + +**Explore** + +You can use Discover in Kibana to interactively search and filter your data. +From there, you can start creating visualizations and building and sharing dashboards. + +To get started, create a _data view_ that connects to one or more Elasticsearch indices, +data streams, or index aliases. + +. Go to **Management > Stack Management > Kibana > Data Views**. +. Select **Create data view**. +. Enter a name for the data view and a pattern that matches one or more indices, +such as _customer_. +. Select **Save data view to Kibana**. + +To start exploring, go to **Analytics > Discover**. [[upgrade]] == Upgrade diff --git a/build-tools-internal/gradle/wrapper/gradle-wrapper.properties b/build-tools-internal/gradle/wrapper/gradle-wrapper.properties index b871071c412e2..e939ec976751d 100644 --- a/build-tools-internal/gradle/wrapper/gradle-wrapper.properties +++ b/build-tools-internal/gradle/wrapper/gradle-wrapper.properties @@ -1,6 +1,6 @@ distributionBase=GRADLE_USER_HOME distributionPath=wrapper/dists -distributionUrl=https\://services.gradle.org/distributions/gradle-7.5-all.zip +distributionUrl=https\://services.gradle.org/distributions/gradle-7.5.1-all.zip zipStoreBase=GRADLE_USER_HOME zipStorePath=wrapper/dists -distributionSha256Sum=97a52d145762adc241bad7fd18289bf7f6801e08ece6badf80402fe2b9f250b1 +distributionSha256Sum=db9c8211ed63f61f60292c69e80d89196f9eb36665e369e7f00ac4cc841c2219 diff --git a/build-tools-internal/src/main/groovy/elasticsearch.build-complete.gradle b/build-tools-internal/src/main/groovy/elasticsearch.build-complete.gradle index 50db02d9e21a1..156f0b2555447 100644 --- a/build-tools-internal/src/main/groovy/elasticsearch.build-complete.gradle +++ b/build-tools-internal/src/main/groovy/elasticsearch.build-complete.gradle @@ -23,7 +23,6 @@ if (buildNumber && performanceTest == null) { fileset(dir: projectDir) { Set fileSet = fileTree(projectDir) { include("**/*.hprof") - include("**/reaper.log") include("**/build/test-results/**/*.xml") include("**/build/testclusters/**") exclude("**/build/testclusters/**/data/**") @@ -49,6 +48,8 @@ if (buildNumber && performanceTest == null) { } fileset(dir: "${gradle.gradleUserHomeDir}/workers", followsymlinks: false) + + fileset(dir: "${project.projectDir}/.gradle/reaper", followsymlinks: false, erroronmissingdir: false) } } catch (Exception e) { logger.lifecycle("Failed to archive additional logs", e) diff --git a/build-tools-internal/src/main/java/org/elasticsearch/gradle/internal/BaseInternalPluginBuildPlugin.java b/build-tools-internal/src/main/java/org/elasticsearch/gradle/internal/BaseInternalPluginBuildPlugin.java index 3a35e9dff79d8..7d801be7b1a06 100644 --- a/build-tools-internal/src/main/java/org/elasticsearch/gradle/internal/BaseInternalPluginBuildPlugin.java +++ b/build-tools-internal/src/main/java/org/elasticsearch/gradle/internal/BaseInternalPluginBuildPlugin.java @@ -11,6 +11,7 @@ import groovy.lang.Closure; import org.elasticsearch.gradle.internal.conventions.util.Util; +import org.elasticsearch.gradle.internal.info.BuildParams; import org.elasticsearch.gradle.plugin.PluginBuildPlugin; import org.elasticsearch.gradle.plugin.PluginPropertiesExtension; import org.elasticsearch.gradle.testclusters.ElasticsearchCluster; @@ -48,18 +49,21 @@ public void apply(Project project) { .getExtraProperties() .set("addQaCheckDependencies", new Closure(BaseInternalPluginBuildPlugin.this, BaseInternalPluginBuildPlugin.this) { public void doCall(Project proj) { - proj.afterEvaluate(project1 -> { - // let check depend on check tasks of qa sub-projects - final var checkTaskProvider = project1.getTasks().named("check"); - Optional qaSubproject = project1.getSubprojects() - .stream() - .filter(p -> p.getPath().equals(project1.getPath() + ":qa")) - .findFirst(); - qaSubproject.ifPresent( - qa -> qa.getSubprojects() - .forEach(p -> checkTaskProvider.configure(task -> task.dependsOn(p.getPath() + ":check"))) - ); - }); + // This is only a convenience for local developers so make this a noop when running in CI + if (BuildParams.isCi() == false) { + proj.afterEvaluate(project1 -> { + // let check depend on check tasks of qa sub-projects + final var checkTaskProvider = project1.getTasks().named("check"); + Optional qaSubproject = project1.getSubprojects() + .stream() + .filter(p -> p.getPath().equals(project1.getPath() + ":qa")) + .findFirst(); + qaSubproject.ifPresent( + qa -> qa.getSubprojects() + .forEach(p -> checkTaskProvider.configure(task -> task.dependsOn(p.getPath() + ":check"))) + ); + }); + } } public void doCall() { diff --git a/build-tools-internal/src/main/java/org/elasticsearch/gradle/internal/InternalDistributionBwcSetupPlugin.java b/build-tools-internal/src/main/java/org/elasticsearch/gradle/internal/InternalDistributionBwcSetupPlugin.java index 3856e9826e7f0..f974b02a1c5b3 100644 --- a/build-tools-internal/src/main/java/org/elasticsearch/gradle/internal/InternalDistributionBwcSetupPlugin.java +++ b/build-tools-internal/src/main/java/org/elasticsearch/gradle/internal/InternalDistributionBwcSetupPlugin.java @@ -24,6 +24,7 @@ import org.gradle.language.base.plugins.LifecycleBasePlugin; import java.io.File; +import java.nio.file.Path; import java.util.ArrayList; import java.util.List; import java.util.Locale; @@ -249,9 +250,10 @@ static void createBuildBwcTask( @Override public void execute(Task task) { if (expectedOutputFile.exists() == false) { - throw new InvalidUserDataException( - "Building " + bwcVersion.get() + " didn't generate expected artifact " + expectedOutputFile - ); + Path relativeOutputPath = project.getRootDir().toPath().relativize(expectedOutputFile.toPath()); + final String message = "Building %s didn't generate expected artifact [%s]. The working branch may be " + + "out-of-date - try merging in the latest upstream changes to the branch."; + throw new InvalidUserDataException(message.formatted(bwcVersion.get(), relativeOutputPath)); } } }); diff --git a/build-tools-internal/src/main/java/org/elasticsearch/gradle/internal/docker/DockerBuildTask.java b/build-tools-internal/src/main/java/org/elasticsearch/gradle/internal/docker/DockerBuildTask.java index c18adf4ab01eb..20f46990815bd 100644 --- a/build-tools-internal/src/main/java/org/elasticsearch/gradle/internal/docker/DockerBuildTask.java +++ b/build-tools-internal/src/main/java/org/elasticsearch/gradle/internal/docker/DockerBuildTask.java @@ -7,6 +7,7 @@ */ package org.elasticsearch.gradle.internal.docker; +import org.elasticsearch.gradle.Architecture; import org.elasticsearch.gradle.LoggedExec; import org.gradle.api.DefaultTask; import org.gradle.api.GradleException; @@ -54,6 +55,7 @@ public class DockerBuildTask extends DefaultTask { private boolean noCache = true; private String[] baseImages; private MapProperty buildArgs; + private Property platform; @Inject public DockerBuildTask(WorkerExecutor workerExecutor, ObjectFactory objectFactory, ProjectLayout projectLayout) { @@ -61,6 +63,7 @@ public DockerBuildTask(WorkerExecutor workerExecutor, ObjectFactory objectFactor this.markerFile = objectFactory.fileProperty(); this.dockerContext = objectFactory.directoryProperty(); this.buildArgs = objectFactory.mapProperty(String.class, String.class); + this.platform = objectFactory.property(String.class).convention(Architecture.current().dockerPlatform); this.markerFile.set(projectLayout.getBuildDirectory().file("markers/" + this.getName() + ".marker")); } @@ -74,6 +77,7 @@ public void build() { params.getNoCache().set(noCache); params.getBaseImages().set(Arrays.asList(baseImages)); params.getBuildArgs().set(buildArgs); + params.getPlatform().set(platform); }); } @@ -124,8 +128,9 @@ public MapProperty getBuildArgs() { return buildArgs; } - public void setBuildArgs(MapProperty buildArgs) { - this.buildArgs = buildArgs; + @Input + public Property getPlatform() { + return platform; } @OutputFile @@ -176,12 +181,21 @@ public void execute() { } final List tags = parameters.getTags().get(); + final boolean isCrossPlatform = parameters.getPlatform().get().equals(Architecture.current().dockerPlatform) == false; LoggedExec.exec(execOperations, spec -> { spec.executable("docker"); + if (isCrossPlatform) { + spec.args("buildx"); + } + spec.args("build", parameters.getDockerContext().get().getAsFile().getAbsolutePath()); + if (isCrossPlatform) { + spec.args("--platform", parameters.getPlatform().get()); + } + if (parameters.getNoCache().get()) { spec.args("--no-cache"); } @@ -228,5 +242,7 @@ interface Parameters extends WorkParameters { ListProperty getBaseImages(); MapProperty getBuildArgs(); + + Property getPlatform(); } } diff --git a/build-tools-internal/src/main/java/org/elasticsearch/gradle/internal/docker/DockerSupportService.java b/build-tools-internal/src/main/java/org/elasticsearch/gradle/internal/docker/DockerSupportService.java index 4d78a0a7c36d1..0375d598dcdd2 100644 --- a/build-tools-internal/src/main/java/org/elasticsearch/gradle/internal/docker/DockerSupportService.java +++ b/build-tools-internal/src/main/java/org/elasticsearch/gradle/internal/docker/DockerSupportService.java @@ -7,6 +7,7 @@ */ package org.elasticsearch.gradle.internal.docker; +import org.elasticsearch.gradle.Architecture; import org.elasticsearch.gradle.Version; import org.elasticsearch.gradle.internal.info.BuildParams; import org.gradle.api.GradleException; @@ -23,26 +24,31 @@ import java.nio.file.Files; import java.nio.file.Path; import java.nio.file.Paths; +import java.util.Arrays; import java.util.Collections; import java.util.HashMap; +import java.util.HashSet; import java.util.List; import java.util.Locale; import java.util.Map; import java.util.Optional; +import java.util.Set; import java.util.stream.Collectors; import javax.inject.Inject; +import static java.util.function.Predicate.not; + /** * Build service for detecting available Docker installation and checking for compatibility with Elasticsearch Docker image build * requirements. This includes a minimum version requirement, as well as the ability to run privileged commands. */ public abstract class DockerSupportService implements BuildService { - private static Logger LOGGER = Logging.getLogger(DockerSupportService.class); + private static final Logger LOGGER = Logging.getLogger(DockerSupportService.class); // Defines the possible locations of the Docker CLI. These will be searched in order. - private static String[] DOCKER_BINARIES = { "/usr/bin/docker", "/usr/local/bin/docker" }; - private static String[] DOCKER_COMPOSE_BINARIES = { "/usr/local/bin/docker-compose", "/usr/bin/docker-compose" }; + private static final String[] DOCKER_BINARIES = { "/usr/bin/docker", "/usr/local/bin/docker" }; + private static final String[] DOCKER_COMPOSE_BINARIES = { "/usr/local/bin/docker-compose", "/usr/bin/docker-compose" }; private static final Version MINIMUM_DOCKER_VERSION = Version.fromString("17.05.0"); private final ExecOperations execOperations; @@ -65,6 +71,7 @@ public DockerAvailability getDockerAvailability() { Version version = null; boolean isVersionHighEnough = false; boolean isComposeAvailable = false; + Set supportedArchitectures = new HashSet<>(); // Check if the Docker binary exists final Optional dockerBinary = getDockerPath(); @@ -92,6 +99,25 @@ public DockerAvailability getDockerAvailability() { if (lastResult.isSuccess() && composePath.isPresent()) { isComposeAvailable = runCommand(composePath.get(), "version").isSuccess(); } + + // Now let's check if buildx is available and what supported platforms exist + if (lastResult.isSuccess()) { + Result buildxResult = runCommand(dockerPath, "buildx", "inspect", "--bootstrap"); + if (buildxResult.isSuccess()) { + supportedArchitectures = buildxResult.stdout() + .lines() + .filter(l -> l.startsWith("Platforms:")) + .map(l -> l.substring(10)) + .flatMap(l -> Arrays.stream(l.split(",")).filter(not(String::isBlank))) + .map(String::trim) + .map(s -> Arrays.stream(Architecture.values()).filter(a -> a.dockerPlatform.equals(s)).findAny()) + .filter(Optional::isPresent) + .map(Optional::get) + .collect(Collectors.toSet()); + } else { + supportedArchitectures = Set.of(Architecture.current()); + } + } } } } @@ -104,7 +130,8 @@ public DockerAvailability getDockerAvailability() { isVersionHighEnough, dockerPath, version, - lastResult + lastResult, + supportedArchitectures ); } @@ -334,7 +361,10 @@ public record DockerAvailability( Version version, // Information about the last command executes while probing Docker, or null. - Result lastCommand + Result lastCommand, + + // Supported build architectures + Set supportedArchitectures ) {} /** diff --git a/build-tools-internal/src/main/java/org/elasticsearch/gradle/internal/release/GenerateReleaseNotesTask.java b/build-tools-internal/src/main/java/org/elasticsearch/gradle/internal/release/GenerateReleaseNotesTask.java index 25034eb36b1a7..4f7799d1074fd 100644 --- a/build-tools-internal/src/main/java/org/elasticsearch/gradle/internal/release/GenerateReleaseNotesTask.java +++ b/build-tools-internal/src/main/java/org/elasticsearch/gradle/internal/release/GenerateReleaseNotesTask.java @@ -128,26 +128,29 @@ public void executeTask() throws IOException { changelogsByVersion.getOrDefault(qualifiedVersion, Set.of()) ); - LOGGER.info("Generating release highlights..."); - ReleaseHighlightsGenerator.update( - this.releaseHighlightsTemplate.get().getAsFile(), - this.releaseHighlightsFile.get().getAsFile(), - entries - ); + // Only update breaking changes and migration guide for new minors + if (qualifiedVersion.revision() == 0) { + LOGGER.info("Generating release highlights..."); + ReleaseHighlightsGenerator.update( + this.releaseHighlightsTemplate.get().getAsFile(), + this.releaseHighlightsFile.get().getAsFile(), + entries + ); - LOGGER.info("Generating breaking changes / deprecations notes..."); - BreakingChangesGenerator.update( - this.breakingChangesTemplate.get().getAsFile(), - this.breakingChangesMigrationFile.get().getAsFile(), - entries - ); + LOGGER.info("Generating breaking changes / deprecations notes..."); + BreakingChangesGenerator.update( + this.breakingChangesTemplate.get().getAsFile(), + this.breakingChangesMigrationFile.get().getAsFile(), + entries + ); - LOGGER.info("Updating migration/index..."); - MigrationIndexGenerator.update( - getMinorVersions(versions), - this.migrationIndexTemplate.get().getAsFile(), - this.migrationIndexFile.get().getAsFile() - ); + LOGGER.info("Updating migration/index..."); + MigrationIndexGenerator.update( + getMinorVersions(versions), + this.migrationIndexTemplate.get().getAsFile(), + this.migrationIndexFile.get().getAsFile() + ); + } } /** diff --git a/build-tools-internal/src/main/java/org/elasticsearch/gradle/internal/release/ValidateChangelogEntryTask.java b/build-tools-internal/src/main/java/org/elasticsearch/gradle/internal/release/ValidateChangelogEntryTask.java index 14114314ad4de..acbd79fe28194 100644 --- a/build-tools-internal/src/main/java/org/elasticsearch/gradle/internal/release/ValidateChangelogEntryTask.java +++ b/build-tools-internal/src/main/java/org/elasticsearch/gradle/internal/release/ValidateChangelogEntryTask.java @@ -8,6 +8,8 @@ package org.elasticsearch.gradle.internal.release; +import com.google.common.annotations.VisibleForTesting; + import org.gradle.api.DefaultTask; import org.gradle.api.GradleException; import org.gradle.api.file.ConfigurableFileCollection; @@ -30,6 +32,21 @@ public class ValidateChangelogEntryTask extends DefaultTask { private final ConfigurableFileCollection changelogs; private final ProjectLayout projectLayout; + public static final String TRIPLE_BACKTICK = "```"; + private static final String CODE_BLOCK_ERROR = """ + [%s] uses a triple-backtick in the [%s] section, but it must be + formatted as a Asciidoc code block. For example: + + [source,yaml] + ---- + { + "metrics.time" : 10, + "metrics.time.min" : 1, + "metrics.time.max" : 500 + } + ---- + """; + @Inject public ValidateChangelogEntryTask(ObjectFactory objectFactory, ProjectLayout projectLayout) { this.changelogs = objectFactory.fileCollection(); @@ -43,37 +60,60 @@ public void executeTask() { .stream() .collect(Collectors.toMap(file -> rootDir.relativize(file.toURI()).toString(), ChangelogEntry::parse)); + changelogs.forEach(ValidateChangelogEntryTask::validate); + } + + @VisibleForTesting + static void validate(String path, ChangelogEntry entry) { // We don't try to find all such errors, because we expect them to be rare e.g. only // when a new file is added. - changelogs.forEach((path, entry) -> { - final String type = entry.getType(); - - if (type.equals("known-issue") == false && type.equals("security") == false) { - if (entry.getPr() == null) { - throw new GradleException( - "[" + path + "] must provide a [pr] number (only 'known-issue' and " + "'security' entries can omit this" - ); - } - - if (entry.getArea() == null) { - throw new GradleException( - "[" + path + "] must provide an [area] (only 'known-issue' and " + "'security' entries can omit this" - ); - } + final String type = entry.getType(); + + if (type.equals("known-issue") == false && type.equals("security") == false) { + if (entry.getPr() == null) { + throw new GradleException( + "[" + path + "] must provide a [pr] number (only 'known-issue' and 'security' entries can omit this" + ); } - if ((type.equals("breaking") || type.equals("breaking-java")) && entry.getBreaking() == null) { + if (entry.getArea() == null) { + throw new GradleException("[" + path + "] must provide an [area] (only 'known-issue' and 'security' entries can omit this"); + } + } + + if (type.equals("breaking") || type.equals("breaking-java")) { + if (entry.getBreaking() == null) { throw new GradleException( "[" + path + "] has type [" + type + "] and must supply a [breaking] section with further information" ); } - if (type.equals("deprecation") && entry.getDeprecation() == null) { + if (entry.getBreaking().getDetails().contains(TRIPLE_BACKTICK)) { + throw new GradleException(CODE_BLOCK_ERROR.formatted(path, "breaking.details")); + } + if (entry.getBreaking().getImpact().contains(TRIPLE_BACKTICK)) { + throw new GradleException(CODE_BLOCK_ERROR.formatted(path, "breaking.impact")); + } + } + + if (type.equals("deprecation")) { + if (entry.getDeprecation() == null) { throw new GradleException( "[" + path + "] has type [deprecation] and must supply a [deprecation] section with further information" ); } - }); + + if (entry.getDeprecation().getDetails().contains(TRIPLE_BACKTICK)) { + throw new GradleException(CODE_BLOCK_ERROR.formatted(path, "deprecation.details")); + } + if (entry.getDeprecation().getImpact().contains(TRIPLE_BACKTICK)) { + throw new GradleException(CODE_BLOCK_ERROR.formatted(path, "deprecation.impact")); + } + } + + if (entry.getHighlight() != null && entry.getHighlight().getBody().contains(TRIPLE_BACKTICK)) { + throw new GradleException(CODE_BLOCK_ERROR.formatted(path, "highlight.body")); + } } @InputFiles diff --git a/build-tools-internal/src/main/resources/checkstyle-idea.xml b/build-tools-internal/src/main/resources/checkstyle-idea.xml index c164c578a73a9..607116560cd80 100644 --- a/build-tools-internal/src/main/resources/checkstyle-idea.xml +++ b/build-tools-internal/src/main/resources/checkstyle-idea.xml @@ -1,19 +1,20 @@ - - - + + 10.3.1 + JavaOnlyWithTests + + + + diff --git a/build-tools-internal/src/main/resources/minimumGradleVersion b/build-tools-internal/src/main/resources/minimumGradleVersion index 72906051c5c71..7501d508f743f 100644 --- a/build-tools-internal/src/main/resources/minimumGradleVersion +++ b/build-tools-internal/src/main/resources/minimumGradleVersion @@ -1 +1 @@ -7.5 \ No newline at end of file +7.5.1 \ No newline at end of file diff --git a/build-tools-internal/src/main/resources/templates/breaking-changes.asciidoc b/build-tools-internal/src/main/resources/templates/breaking-changes.asciidoc index 75abce3b2b6ef..a16c7e393ee90 100644 --- a/build-tools-internal/src/main/resources/templates/breaking-changes.asciidoc +++ b/build-tools-internal/src/main/resources/templates/breaking-changes.asciidoc @@ -72,7 +72,7 @@ if (deprecationsByNotabilityByArea.isEmpty() == false) { %> The following functionality has been deprecated in {es} ${majorDotMinor} and will be removed in a future version. While this won't have an immediate impact on your applications, -we strongly encourage you take the described steps to update your code +we strongly encourage you to take the described steps to update your code after upgrading to ${majorDotMinor}. To find out if you are using any deprecated functionality, diff --git a/build-tools-internal/src/main/resources/templates/release-highlights.asciidoc b/build-tools-internal/src/main/resources/templates/release-highlights.asciidoc index f07ba9c5d4db3..bd8ef8602530b 100644 --- a/build-tools-internal/src/main/resources/templates/release-highlights.asciidoc +++ b/build-tools-internal/src/main/resources/templates/release-highlights.asciidoc @@ -32,14 +32,18 @@ if (notableHighlights.isEmpty()) { %> <% for (highlight in notableHighlights) { %> [discrete] [[${ highlight.anchor }]] -=== {es-pull}${highlight.pr}[${highlight.title}] +=== ${highlight.title} ${highlight.body.trim()} + +{es-pull}${highlight.pr}[#${highlight.pr}] <% } %> // end::notable-highlights[] <% } %> <% for (highlight in nonNotableHighlights) { %> [discrete] [[${ highlight.anchor }]] -=== {es-pull}${highlight.pr}[${highlight.title}] +=== ${highlight.title} ${highlight.body.trim()} + +{es-pull}${highlight.pr}[#${highlight.pr}] <% } %> diff --git a/build-tools-internal/src/test/java/org/elasticsearch/gradle/internal/release/ReleaseHighlightsGeneratorTest.java b/build-tools-internal/src/test/java/org/elasticsearch/gradle/internal/release/ReleaseHighlightsGeneratorTest.java index 7f510bef22661..db39c6eea7e86 100644 --- a/build-tools-internal/src/test/java/org/elasticsearch/gradle/internal/release/ReleaseHighlightsGeneratorTest.java +++ b/build-tools-internal/src/test/java/org/elasticsearch/gradle/internal/release/ReleaseHighlightsGeneratorTest.java @@ -60,11 +60,11 @@ public void generateFile_rendersCorrectMarkup() throws Exception { } private List getEntries() { - ChangelogEntry entry1 = makeChangelogEntry(1, true); - ChangelogEntry entry2 = makeChangelogEntry(2, true); - ChangelogEntry entry3 = makeChangelogEntry(3, false); + ChangelogEntry entry123 = makeChangelogEntry(123, true); + ChangelogEntry entry456 = makeChangelogEntry(456, true); + ChangelogEntry entry789 = makeChangelogEntry(789, false); // Return unordered list, to test correct re-ordering - return List.of(entry2, entry1, entry3); + return List.of(entry456, entry123, entry789); } private ChangelogEntry makeChangelogEntry(int pr, boolean notable) { diff --git a/build-tools-internal/src/test/java/org/elasticsearch/gradle/internal/release/ValidateChangelogEntryTaskTest.java b/build-tools-internal/src/test/java/org/elasticsearch/gradle/internal/release/ValidateChangelogEntryTaskTest.java new file mode 100644 index 0000000000000..ec7b47b057a97 --- /dev/null +++ b/build-tools-internal/src/test/java/org/elasticsearch/gradle/internal/release/ValidateChangelogEntryTaskTest.java @@ -0,0 +1,179 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License + * 2.0 and the Server Side Public License, v 1; you may not use this file except + * in compliance with, at your election, the Elastic License 2.0 or the Server + * Side Public License, v 1. + */ + +package org.elasticsearch.gradle.internal.release; + +import org.gradle.api.GradleException; +import org.hamcrest.Matchers; +import org.junit.jupiter.api.Test; + +import java.util.stream.Stream; + +import static org.hamcrest.MatcherAssert.assertThat; +import static org.hamcrest.Matchers.containsString; +import static org.hamcrest.Matchers.endsWith; + +class ValidateChangelogEntryTaskTest { + + @Test + void test_prNumber_isRequired() { + ChangelogEntry changelog = new ChangelogEntry(); + changelog.setType("enhancement"); + + final String message = doValidate(changelog); + + assertThat(message, endsWith("must provide a [pr] number (only 'known-issue' and 'security' entries can omit this")); + } + + @Test + void test_prNumber_notRequired() { + Stream.of("known-issue", "security").forEach(type -> { + ChangelogEntry changelog = new ChangelogEntry(); + changelog.setType(type); + + // Should not throw an exception! + ValidateChangelogEntryTask.validate("", changelog); + }); + } + + @Test + void test_area_isRequired() { + final ChangelogEntry changelog = new ChangelogEntry(); + changelog.setType("enhancement"); + changelog.setPr(123); + + final String message = doValidate(changelog); + + assertThat(message, endsWith("must provide an [area] (only 'known-issue' and 'security' entries can omit this")); + } + + @Test + void test_breaking_requiresBreakingSection() { + Stream.of("breaking", "breaking-java").forEach(type -> { + final ChangelogEntry changelog = buildChangelog(type); + + final String message = doValidate(changelog); + + assertThat(message, endsWith("has type [" + type + "] and must supply a [breaking] section with further information")); + }); + } + + @Test + void test_breaking_rejectsTripleBackticksInDetails() { + Stream.of("breaking", "breaking-java").forEach(type -> { + final ChangelogEntry.Breaking breaking = new ChangelogEntry.Breaking(); + breaking.setDetails(""" + Some waffle. + ``` + I AM CODE! + ``` + """); + + final ChangelogEntry changelog = buildChangelog(type); + changelog.setBreaking(breaking); + + final String message = doValidate(changelog); + + assertThat(message, containsString("uses a triple-backtick in the [breaking.details] section")); + }); + } + + @Test + void test_breaking_rejectsTripleBackticksInImpact() { + Stream.of("breaking", "breaking-java").forEach(type -> { + final ChangelogEntry.Breaking breaking = new ChangelogEntry.Breaking(); + breaking.setDetails("Waffle waffle"); + breaking.setImpact(""" + More waffle. + ``` + THERE ARE WEASEL RAKING THROUGH MY GARBAGE! + ``` + """); + + final ChangelogEntry changelog = buildChangelog(type); + changelog.setBreaking(breaking); + + final String message = doValidate(changelog); + + assertThat(message, containsString("uses a triple-backtick in the [breaking.impact] section")); + }); + } + + @Test + void test_deprecation_rejectsTripleBackticksInImpact() { + final ChangelogEntry.Deprecation deprecation = new ChangelogEntry.Deprecation(); + deprecation.setDetails("Waffle waffle"); + deprecation.setImpact(""" + More waffle. + ``` + THERE ARE WEASEL RAKING THROUGH MY GARBAGE! + ``` + """); + + final ChangelogEntry changelog = buildChangelog("deprecation"); + changelog.setDeprecation(deprecation); + + final String message = doValidate(changelog); + + assertThat(message, containsString("uses a triple-backtick in the [deprecation.impact] section")); + } + + @Test + void test_deprecation_rejectsTripleBackticksInDetails() { + final ChangelogEntry.Deprecation deprecation = new ChangelogEntry.Deprecation(); + deprecation.setDetails(""" + Some waffle. + ``` + I AM CODE! + ``` + """); + + final ChangelogEntry changelog = buildChangelog("deprecation"); + changelog.setDeprecation(deprecation); + + final String message = doValidate(changelog); + + assertThat(message, containsString("uses a triple-backtick in the [deprecation.details] section")); + } + + @Test + void test_highlight_rejectsTripleBackticksInBody() { + final ChangelogEntry.Highlight highlight = new ChangelogEntry.Highlight(); + highlight.setBody(""" + Some waffle. + ``` + I AM CODE! + ``` + """); + + final ChangelogEntry changelog = buildChangelog("enhancement"); + changelog.setHighlight(highlight); + + final String message = doValidate(changelog); + + assertThat(message, containsString("uses a triple-backtick in the [highlight.body] section")); + } + + private static ChangelogEntry buildChangelog(String type) { + final ChangelogEntry changelog = new ChangelogEntry(); + changelog.setType(type); + changelog.setPr(123); + changelog.setArea("Infra/Core"); + return changelog; + } + + private String doValidate(ChangelogEntry entry) { + try { + ValidateChangelogEntryTask.validate("docs/123.yaml", entry); + throw new AssertionError("No exception thrown!"); + } catch (Exception e) { + assertThat(e, Matchers.instanceOf(GradleException.class)); + return e.getMessage(); + } + } +} diff --git a/build-tools-internal/src/test/resources/org/elasticsearch/gradle/internal/release/BreakingChangesGeneratorTest.generateMigrationFile.asciidoc b/build-tools-internal/src/test/resources/org/elasticsearch/gradle/internal/release/BreakingChangesGeneratorTest.generateMigrationFile.asciidoc index 0de9941327a66..d8be431302601 100644 --- a/build-tools-internal/src/test/resources/org/elasticsearch/gradle/internal/release/BreakingChangesGeneratorTest.generateMigrationFile.asciidoc +++ b/build-tools-internal/src/test/resources/org/elasticsearch/gradle/internal/release/BreakingChangesGeneratorTest.generateMigrationFile.asciidoc @@ -89,7 +89,7 @@ Breaking change impact description 3 The following functionality has been deprecated in {es} 8.4 and will be removed in a future version. While this won't have an immediate impact on your applications, -we strongly encourage you take the described steps to update your code +we strongly encourage you to take the described steps to update your code after upgrading to 8.4. To find out if you are using any deprecated functionality, diff --git a/build-tools-internal/src/test/resources/org/elasticsearch/gradle/internal/release/ReleaseHighlightsGeneratorTest.generateFile.asciidoc b/build-tools-internal/src/test/resources/org/elasticsearch/gradle/internal/release/ReleaseHighlightsGeneratorTest.generateFile.asciidoc index a55a590a8bca5..19c713042a42b 100644 --- a/build-tools-internal/src/test/resources/org/elasticsearch/gradle/internal/release/ReleaseHighlightsGeneratorTest.generateFile.asciidoc +++ b/build-tools-internal/src/test/resources/org/elasticsearch/gradle/internal/release/ReleaseHighlightsGeneratorTest.generateFile.asciidoc @@ -20,20 +20,26 @@ Other versions: // tag::notable-highlights[] [discrete] -[[notable_release_highlight_number_1]] -=== {es-pull}1[Notable release highlight number 1] -Notable release body number 1 +[[notable_release_highlight_number_123]] +=== Notable release highlight number 123 +Notable release body number 123 + +{es-pull}123[#123] [discrete] -[[notable_release_highlight_number_2]] -=== {es-pull}2[Notable release highlight number 2] -Notable release body number 2 +[[notable_release_highlight_number_456]] +=== Notable release highlight number 456 +Notable release body number 456 + +{es-pull}456[#456] // end::notable-highlights[] [discrete] -[[notable_release_highlight_number_3]] -=== {es-pull}3[Notable release highlight number 3] -Notable release body number 3 +[[notable_release_highlight_number_789]] +=== Notable release highlight number 789 +Notable release body number 789 + +{es-pull}789[#789] diff --git a/build-tools-internal/version.properties b/build-tools-internal/version.properties index 8d024789dd2f7..e55d8f51e8efc 100644 --- a/build-tools-internal/version.properties +++ b/build-tools-internal/version.properties @@ -1,8 +1,8 @@ -elasticsearch = 8.4.0 -lucene = 9.3.0-snapshot-b8d1fcfd0ec +elasticsearch = 8.4.3 +lucene = 9.3.0 bundled_jdk_vendor = openjdk -bundled_jdk = 18.0.2+9@f6ad4b4450fd4d298113270ec84f30ee +bundled_jdk = 18.0.2.1+1@db379da656dc47308e138f21b33976fa # optional dependencies spatial4j = 0.7 diff --git a/build-tools/reaper/src/main/java/org/elasticsearch/gradle/reaper/Reaper.java b/build-tools/reaper/src/main/java/org/elasticsearch/gradle/reaper/Reaper.java index f5a24eba36872..e6c5b61e0a76c 100644 --- a/build-tools/reaper/src/main/java/org/elasticsearch/gradle/reaper/Reaper.java +++ b/build-tools/reaper/src/main/java/org/elasticsearch/gradle/reaper/Reaper.java @@ -83,17 +83,17 @@ private void reap() { delete(inputFile); } } - } catch (Exception e) { + } catch (Throwable e) { + failed = true; logFailure("Failed to reap inputs", e); } } - private void logFailure(String message, Exception e) { + private void logFailure(String message, Throwable e) { System.err.println(message); if (e != null) { e.printStackTrace(System.err); } - failed = true; } private void delete(Path toDelete) { diff --git a/build-tools/src/main/java/org/elasticsearch/gradle/Architecture.java b/build-tools/src/main/java/org/elasticsearch/gradle/Architecture.java index 665568e337376..34874b62d9489 100644 --- a/build-tools/src/main/java/org/elasticsearch/gradle/Architecture.java +++ b/build-tools/src/main/java/org/elasticsearch/gradle/Architecture.java @@ -10,13 +10,15 @@ public enum Architecture { - X64("x86_64"), - AARCH64("aarch64"); + X64("x86_64", "linux/amd64"), + AARCH64("aarch64", "linux/arm64"); public final String classifier; + public final String dockerPlatform; - Architecture(String classifier) { + Architecture(String classifier, String dockerPlatform) { this.classifier = classifier; + this.dockerPlatform = dockerPlatform; } public static Architecture current() { diff --git a/build-tools/src/main/java/org/elasticsearch/gradle/ReaperService.java b/build-tools/src/main/java/org/elasticsearch/gradle/ReaperService.java index ece27cef7b66f..d63efbe3e55cb 100644 --- a/build-tools/src/main/java/org/elasticsearch/gradle/ReaperService.java +++ b/build-tools/src/main/java/org/elasticsearch/gradle/ReaperService.java @@ -78,7 +78,7 @@ void shutdown() { logger.info("Waiting for reaper to exit normally"); if (reaperProcess.waitFor() != 0) { Path inputDir = getParameters().getInputDir().get().getAsFile().toPath(); - throw new GradleException("Reaper process failed. Check log at " + inputDir.resolve("error.log") + " for details"); + throw new GradleException("Reaper process failed. Check log at " + inputDir.resolve("reaper.log") + " for details"); } } catch (Exception e) { throw new RuntimeException(e); @@ -109,7 +109,7 @@ private synchronized void ensureReaperStarted() { builder.redirectInput(ProcessBuilder.Redirect.PIPE); File logFile = logFilePath().toFile(); builder.redirectOutput(logFile); - builder.redirectError(logFile); + builder.redirectErrorStream(); reaperProcess = builder.start(); } catch (Exception e) { throw new RuntimeException(e); diff --git a/build-tools/src/main/java/org/elasticsearch/gradle/testclusters/ElasticsearchNode.java b/build-tools/src/main/java/org/elasticsearch/gradle/testclusters/ElasticsearchNode.java index fcc4640ae43ca..bca06c302d2a5 100644 --- a/build-tools/src/main/java/org/elasticsearch/gradle/testclusters/ElasticsearchNode.java +++ b/build-tools/src/main/java/org/elasticsearch/gradle/testclusters/ElasticsearchNode.java @@ -62,6 +62,8 @@ import java.io.IOException; import java.io.InputStream; import java.io.LineNumberReader; +import java.io.PrintWriter; +import java.io.StringWriter; import java.io.UncheckedIOException; import java.net.URL; import java.nio.charset.StandardCharsets; @@ -489,6 +491,13 @@ public void freeze() { configurationFrozen.set(true); } + private static String throwableToString(Throwable t) { + StringWriter sw = new StringWriter(); + PrintWriter pw = new PrintWriter(sw); + t.printStackTrace(pw); + return sw.toString(); + } + @Override public synchronized void start() { LOGGER.info("Starting `{}`", this); @@ -505,11 +514,9 @@ public synchronized void start() { // make sure we always start fresh if (Files.exists(workingDir)) { if (preserveDataDir) { - Files.list(workingDir) - .filter(path -> path.equals(confPathData) == false) - .forEach(path -> fileSystemOperations.delete(d -> d.delete(path))); + Files.list(workingDir).filter(path -> path.equals(confPathData) == false).forEach(this::uncheckedDeleteWithRetry); } else { - fileSystemOperations.delete(d -> d.delete(workingDir)); + deleteWithRetry(workingDir); } } isWorkingDirConfigured = true; @@ -517,7 +524,13 @@ public synchronized void start() { setupNodeDistribution(getExtractedDistributionDir()); createWorkingDir(); } catch (IOException e) { - throw new UncheckedIOException("Failed to create working directory for " + this, e); + String msg = "Failed to create working directory for " + this + ", with: " + e + throwableToString(e); + logToProcessStdout(msg); + throw new UncheckedIOException(msg, e); + } catch (org.gradle.api.UncheckedIOException e) { + String msg = "Failed to create working directory for " + this + ", with: " + e + throwableToString(e); + logToProcessStdout(msg); + throw e; } copyExtraJars(); @@ -1192,9 +1205,75 @@ private void waitForProcessToExit(ProcessHandle processHandle) { } } + private static final int RETRY_DELETE_MILLIS = OS.current() == OS.WINDOWS ? 500 : 0; + private static final int MAX_RETRY_DELETE_TIMES = OS.current() == OS.WINDOWS ? 15 : 0; + + /** + * Deletes a path, retrying if necessary. + * + * @param path the path to delete + * @throws IOException + * if an I/O error occurs + */ + void deleteWithRetry(Path path) throws IOException { + try { + deleteWithRetry0(path); + } catch (InterruptedException x) { + throw new IOException("Interrupted while deleting.", x); + } + } + + /** Unchecked variant of deleteWithRetry. */ + void uncheckedDeleteWithRetry(Path path) { + try { + deleteWithRetry0(path); + } catch (IOException e) { + throw new UncheckedIOException(e); + } catch (InterruptedException x) { + throw new UncheckedIOException("Interrupted while deleting.", new IOException()); + } + } + + // The exception handling here is loathsome, but necessary! + private void deleteWithRetry0(Path path) throws IOException, InterruptedException { + int times = 0; + IOException ioe = null; + while (true) { + try { + fileSystemOperations.delete(d -> d.delete(path)); + times++; + // Checks for absence of the file. Semantics of Files.exists() is not the same. + while (Files.notExists(path) == false) { + if (times > MAX_RETRY_DELETE_TIMES) { + throw new IOException("File still exists after " + times + " waits."); + } + Thread.sleep(RETRY_DELETE_MILLIS); + // retry + fileSystemOperations.delete(d -> d.delete(path)); + times++; + } + break; + } catch (NoSuchFileException ignore) { + // already deleted, ignore + break; + } catch (org.gradle.api.UncheckedIOException | IOException x) { + if (x.getCause() instanceof NoSuchFileException) { + // already deleted, ignore + break; + } + // Backoff/retry in case another process is accessing the file + times++; + if (ioe == null) ioe = new IOException(); + ioe.addSuppressed(x); + if (times > MAX_RETRY_DELETE_TIMES) throw ioe; + Thread.sleep(RETRY_DELETE_MILLIS); + } + } + } + private void createWorkingDir() throws IOException { // Start configuration from scratch in case of a restart - fileSystemOperations.delete(d -> d.delete(configFile.getParent())); + deleteWithRetry(configFile.getParent()); Files.createDirectories(configFile.getParent()); Files.createDirectories(confPathRepo); Files.createDirectories(confPathData); diff --git a/build.gradle b/build.gradle index 9a979f0cdf5f3..8c4df1b287e36 100644 --- a/build.gradle +++ b/build.gradle @@ -6,28 +6,26 @@ * Side Public License, v 1. */ + +import de.thetaphi.forbiddenapis.gradle.ForbiddenApisPlugin import com.avast.gradle.dockercompose.tasks.ComposePull import com.fasterxml.jackson.databind.JsonNode import com.fasterxml.jackson.databind.ObjectMapper -import com.github.jengelman.gradle.plugins.shadow.ShadowPlugin -import de.thetaphi.forbiddenapis.gradle.ForbiddenApisPlugin -import org.elasticsearch.gradle.internal.BuildPlugin + import org.elasticsearch.gradle.Version -import org.elasticsearch.gradle.VersionProperties -import org.elasticsearch.gradle.internal.BwcVersions +import org.elasticsearch.gradle.internal.BaseInternalPluginBuildPlugin +import org.elasticsearch.gradle.internal.ResolveAllDependencies import org.elasticsearch.gradle.internal.info.BuildParams -import org.elasticsearch.gradle.plugin.PluginBuildPlugin +import org.elasticsearch.gradle.util.GradleUtils import org.gradle.plugins.ide.eclipse.model.AccessRule +import org.gradle.plugins.ide.eclipse.model.ProjectDependency import org.gradle.util.DistributionLocator import org.gradle.util.GradleVersion -import org.elasticsearch.gradle.util.GradleUtils -import static org.elasticsearch.gradle.util.GradleUtils.maybeConfigure -import org.gradle.plugins.ide.eclipse.model.ProjectDependency -import org.elasticsearch.gradle.internal.BaseInternalPluginBuildPlugin -import org.elasticsearch.gradle.internal.ResolveAllDependencies import java.nio.file.Files + import static java.nio.file.StandardCopyOption.REPLACE_EXISTING +import static org.elasticsearch.gradle.util.GradleUtils.maybeConfigure plugins { id 'lifecycle-base' @@ -208,14 +206,15 @@ allprojects { } } - def checkPart1 = tasks.register('checkPart1') - def checkPart2 = tasks.register('checkPart2') - def checkPart3 = tasks.register('checkPart3') plugins.withId('lifecycle-base') { if (project.path.startsWith(":x-pack:")) { - checkPart2.configure { dependsOn 'check' } + if (project.path.contains("security") || project.path.contains(":ml")) { + tasks.register('checkPart3') { dependsOn 'check' } + } else { + tasks.register('checkPart2') { dependsOn 'check' } + } } else { - checkPart1.configure { dependsOn 'check' } + tasks.register('checkPart1') { dependsOn 'check' } } } diff --git a/distribution/docker/build.gradle b/distribution/docker/build.gradle index a3be272a09b0c..6459ef42eb173 100644 --- a/distribution/docker/build.gradle +++ b/distribution/docker/build.gradle @@ -4,9 +4,12 @@ import org.elasticsearch.gradle.VersionProperties import org.elasticsearch.gradle.internal.DockerBase import org.elasticsearch.gradle.internal.distribution.InternalElasticsearchDistributionTypes import org.elasticsearch.gradle.internal.docker.DockerBuildTask +import org.elasticsearch.gradle.internal.docker.DockerSupportPlugin +import org.elasticsearch.gradle.internal.docker.DockerSupportService import org.elasticsearch.gradle.internal.docker.ShellRetry import org.elasticsearch.gradle.internal.docker.TransformLog4jConfigFilter import org.elasticsearch.gradle.internal.info.BuildParams +import org.elasticsearch.gradle.util.GradleUtils import java.nio.file.Path import java.time.temporal.ChronoUnit @@ -35,15 +38,22 @@ repositories { // Cloud builds bundle some beats ivy { + name = 'beats' if (useLocalArtifacts) { url "file://${buildDir}/artifacts/" patternLayout { - artifact '/[organisation]/[module]-[revision]-linux-[classifier].[ext]' + artifact '/[organisation]/[module]-[revision]-[classifier].[ext]' } } else { - url "https://${VersionProperties.isElasticsearchSnapshot() ? 'snapshots' : 'artifacts'}-no-kpi.elastic.co/" + url "https://artifacts-snapshot.elastic.co/" patternLayout { - artifact '/downloads/[organization]/[module]/[module]-[revision]-linux-[classifier].[ext]' + if (VersionProperties.isElasticsearchSnapshot()) { + artifact '/[organization]/[revision]/downloads/[organization]/[module]/[module]-[revision]-[classifier].[ext]' + } else { + // When building locally we always use snapshot artifacts even if passing `-Dbuild.snapshot=false`. + // Release builds are always done with a local repo. + artifact '/[organization]/[revision]-SNAPSHOT/downloads/[organization]/[module]/[module]-[revision]-SNAPSHOT-[classifier].[ext]' + } } } metadataSources { artifact() } @@ -72,8 +82,8 @@ dependencies { log4jConfig project(path: ":distribution", configuration: 'log4jConfig') tini "krallin:tini:0.19.0:${tiniArch}" allPlugins project(path: ':plugins', configuration: 'allPlugins') - filebeat "beats:filebeat:${VersionProperties.elasticsearch}:${beatsArch}@tar.gz" - metricbeat "beats:metricbeat:${VersionProperties.elasticsearch}:${beatsArch}@tar.gz" + filebeat "beats:filebeat:${VersionProperties.elasticsearch}:linux-${beatsArch}@tar.gz" + metricbeat "beats:metricbeat:${VersionProperties.elasticsearch}:linux-${beatsArch}@tar.gz" } ext.expansions = { Architecture architecture, DockerBase base -> @@ -264,7 +274,7 @@ void addBuildDockerContextTask(Architecture architecture, DockerBase base) { rename ~/((?:file|metric)beat)-.*\.tar\.gz$/, "\$1-${VersionProperties.elasticsearch}.tar.gz" } - onlyIf { Architecture.current() == architecture } + onlyIf { isArchitectureSupported(architecture) } } if (base == DockerBase.IRON_BANK) { @@ -311,12 +321,12 @@ void addTransformDockerContextTask(Architecture architecture, DockerBase base) { inputs.property(k, { v.toString() }) } - onlyIf { Architecture.current() == architecture } + onlyIf { isArchitectureSupported(architecture) } } } -private static List generateTags(DockerBase base) { +private static List generateTags(DockerBase base, Architecture architecture) { final String version = VersionProperties.elasticsearch String image = "elasticsearch${base.suffix}" @@ -326,11 +336,13 @@ private static List generateTags(DockerBase base) { namespace += '-ci' } - return [ - "${image}:test", - "${image}:${version}", - "docker.elastic.co/${namespace}/${image}:${version}" - ] + def tags = ["${image}:${architecture.classifier}"] + + if (architecture == Architecture.current()) { + tags.addAll(["${image}:test", "${image}:${version}", "docker.elastic.co/${namespace}/${image}:${version}"]) + } + + return tags } void addBuildDockerImageTask(Architecture architecture, DockerBase base) { @@ -343,7 +355,8 @@ void addBuildDockerImageTask(Architecture architecture, DockerBase base) { dockerContext.fileProvider(transformTask.map { Sync task -> task.getDestinationDir() }) noCache = BuildParams.isCi - tags = generateTags(base) + tags = generateTags(base, architecture) + platform = architecture.dockerPlatform // We don't build the Iron Bank image when we release Elasticsearch, as there's // separate process for submitting new releases. However, for testing we do a @@ -368,7 +381,7 @@ void addBuildDockerImageTask(Architecture architecture, DockerBase base) { baseImages = [base.image] } - onlyIf { Architecture.current() == architecture } + onlyIf { isArchitectureSupported(architecture) } } if (base != DockerBase.IRON_BANK && base != DockerBase.CLOUD && base != DockerBase.CLOUD_ESS) { @@ -395,7 +408,7 @@ void addBuildEssDockerImageTask(Architecture architecture) { from(projectDir.resolve("src/docker/Dockerfile.cloud-ess")) { expand([ - base_image: "elasticsearch${DockerBase.CLOUD.suffix}:${VersionProperties.elasticsearch}" + base_image: "elasticsearch${DockerBase.CLOUD.suffix}:${architecture.classifier}" ]) filter SquashNewlinesFilter rename ~/Dockerfile\.cloud-ess$/, 'Dockerfile' @@ -412,9 +425,10 @@ void addBuildEssDockerImageTask(Architecture architecture) { noCache = BuildParams.isCi baseImages = [] - tags = generateTags(base) + tags = generateTags(base, architecture) + platform = architecture.dockerPlatform - onlyIf { Architecture.current() == architecture } + onlyIf { isArchitectureSupported(architecture) } } tasks.named("assemble").configure { @@ -435,6 +449,11 @@ for (final Architecture architecture : Architecture.values()) { addBuildEssDockerImageTask(architecture) } +boolean isArchitectureSupported(Architecture architecture) { + Provider serviceProvider = GradleUtils.getBuildService(project.gradle.sharedServices, DockerSupportPlugin.DOCKER_SUPPORT_SERVICE_NAME) + return serviceProvider.get().dockerAvailability.supportedArchitectures().contains(architecture) +} + /* * The export subprojects write out the generated Docker images to disk, so * that they can be easily reloaded, for example into a VM for distribution testing @@ -474,10 +493,10 @@ subprojects { Project subProject -> args "save", "-o", tarFile, - "elasticsearch${base.suffix}:test" + "elasticsearch${base.suffix}:${architecture.classifier}" dependsOn(parent.path + ":" + buildTaskName) - onlyIf { Architecture.current() == architecture } + onlyIf { isArchitectureSupported(architecture) } } artifacts.add('default', file(tarFile)) { @@ -487,3 +506,8 @@ subprojects { Project subProject -> } } } + +tasks.named('resolveAllDependencies') { + // Don't try and resolve filebeat or metricbeat snapshots as they may not always be available + configs = configurations.matching { it.name.endsWith('beat') == false } +} diff --git a/distribution/docker/ironbank-aarch64-docker-export/build.gradle b/distribution/docker/ironbank-docker-aarch64-export/build.gradle similarity index 100% rename from distribution/docker/ironbank-aarch64-docker-export/build.gradle rename to distribution/docker/ironbank-docker-aarch64-export/build.gradle diff --git a/distribution/tools/server-cli/src/main/java/org/elasticsearch/server/cli/ServerProcess.java b/distribution/tools/server-cli/src/main/java/org/elasticsearch/server/cli/ServerProcess.java index 3c03630b6dd40..23bbd93a4a250 100644 --- a/distribution/tools/server-cli/src/main/java/org/elasticsearch/server/cli/ServerProcess.java +++ b/distribution/tools/server-cli/src/main/java/org/elasticsearch/server/cli/ServerProcess.java @@ -220,6 +220,7 @@ private static Process createProcess( command.addAll(jvmOptions); command.add("--module-path"); command.add(esHome.resolve("lib").toString()); + command.add("--add-modules=jdk.net"); // very special circumstance; explicit modules should typically not be added here command.add("-m"); command.add("org.elasticsearch.server/org.elasticsearch.bootstrap.Elasticsearch"); diff --git a/distribution/tools/windows-service-cli/src/main/java/org/elasticsearch/windows/service/ProcrunCommand.java b/distribution/tools/windows-service-cli/src/main/java/org/elasticsearch/windows/service/ProcrunCommand.java index c10495d3b8af6..b507e5e43a456 100644 --- a/distribution/tools/windows-service-cli/src/main/java/org/elasticsearch/windows/service/ProcrunCommand.java +++ b/distribution/tools/windows-service-cli/src/main/java/org/elasticsearch/windows/service/ProcrunCommand.java @@ -67,7 +67,7 @@ protected void execute(Terminal terminal, OptionSet options, ProcessInfo process preExecute(terminal, processInfo, serviceId); List procrunCmd = new ArrayList<>(); - procrunCmd.add(procrun.toString()); + procrunCmd.add(quote(procrun.toString())); procrunCmd.add("//%s/%s".formatted(cmd, serviceId)); if (includeLogArgs()) { procrunCmd.add(getLogArgs(serviceId, processInfo.workingDir(), processInfo.envVars())); @@ -86,6 +86,11 @@ protected void execute(Terminal terminal, OptionSet options, ProcessInfo process } } + /** Quotes the given String. */ + static String quote(String s) { + return '"' + s + '"'; + } + /** Determines the service id for the Elasticsearch service that should be used */ private String getServiceId(OptionSet options, Map env) throws UserException { List args = options.nonOptionArguments(); diff --git a/distribution/tools/windows-service-cli/src/main/java/org/elasticsearch/windows/service/WindowsServiceInstallCommand.java b/distribution/tools/windows-service-cli/src/main/java/org/elasticsearch/windows/service/WindowsServiceInstallCommand.java index 4e6e2cddfeb93..0d0bd040db30a 100644 --- a/distribution/tools/windows-service-cli/src/main/java/org/elasticsearch/windows/service/WindowsServiceInstallCommand.java +++ b/distribution/tools/windows-service-cli/src/main/java/org/elasticsearch/windows/service/WindowsServiceInstallCommand.java @@ -42,7 +42,7 @@ protected String getAdditionalArgs(String serviceId, ProcessInfo pinfo) { addArg(args, "--Classpath", pinfo.sysprops().get("java.class.path")); addArg(args, "--JvmMs", "4m"); addArg(args, "--JvmMx", "64m"); - addArg(args, "--JvmOptions", getJvmOptions(pinfo.sysprops())); + addQuotedArg(args, "--JvmOptions", getJvmOptions(pinfo.sysprops())); addArg(args, "--PidFile", "%s.pid".formatted(serviceId)); addArg( args, @@ -55,10 +55,10 @@ protected String getAdditionalArgs(String serviceId, ProcessInfo pinfo) { pinfo.envVars() .getOrDefault("SERVICE_DESCRIPTION", "Elasticsearch %s Windows Service - https://elastic.co".formatted(Version.CURRENT)) ); - addArg(args, "--Jvm", getJvmDll(getJavaHome(pinfo.sysprops())).toString()); + addQuotedArg(args, "--Jvm", quote(getJvmDll(getJavaHome(pinfo.sysprops())).toString())); addArg(args, "--StartMode", "jvm"); addArg(args, "--StopMode", "jvm"); - addArg(args, "--StartPath", pinfo.workingDir().toString()); + addQuotedArg(args, "--StartPath", quote(pinfo.workingDir().toString())); addArg(args, "++JvmOptions", "-Dcli.name=windows-service-daemon"); addArg(args, "++JvmOptions", "-Dcli.libs=lib/tools/server-cli,lib/tools/windows-service-cli"); addArg(args, "++Environment", "HOSTNAME=%s".formatted(pinfo.envVars().get("COMPUTERNAME"))); @@ -89,6 +89,13 @@ private static void addArg(List args, String arg, String value) { args.add(value); } + // Adds an arg with an already appropriately quoted value. Trivial, but explicit implementation. + // This method is typically used when adding args whose value contains a file-system path + private static void addQuotedArg(List args, String arg, String value) { + args.add(arg); + args.add(value); + } + @SuppressForbidden(reason = "get java home path to pass through") private static Path getJavaHome(Map sysprops) { return Paths.get(sysprops.get("java.home")); @@ -107,7 +114,7 @@ private static String getJvmOptions(Map sysprops) { jvmOptions.add("-XX:+UseSerialGC"); // passthrough these properties for (var prop : List.of("es.path.home", "es.path.conf", "es.distribution.type")) { - jvmOptions.add("-D%s=%s".formatted(prop, sysprops.get(prop))); + jvmOptions.add("-D%s=%s".formatted(prop, quote(sysprops.get(prop)))); } return String.join(";", jvmOptions); } diff --git a/distribution/tools/windows-service-cli/src/test/java/org/elasticsearch/windows/service/ProcrunCommandTests.java b/distribution/tools/windows-service-cli/src/test/java/org/elasticsearch/windows/service/ProcrunCommandTests.java index b683884a37571..e4b651fcb77af 100644 --- a/distribution/tools/windows-service-cli/src/test/java/org/elasticsearch/windows/service/ProcrunCommandTests.java +++ b/distribution/tools/windows-service-cli/src/test/java/org/elasticsearch/windows/service/ProcrunCommandTests.java @@ -25,6 +25,10 @@ public class ProcrunCommandTests extends WindowsServiceCliTestCase { + public ProcrunCommandTests(boolean spaceInPath) { + super(spaceInPath); + } + PreExecuteHook preExecuteHook; boolean includeLogArgs; String additionalArgs; diff --git a/distribution/tools/windows-service-cli/src/test/java/org/elasticsearch/windows/service/WindowsServiceCliTestCase.java b/distribution/tools/windows-service-cli/src/test/java/org/elasticsearch/windows/service/WindowsServiceCliTestCase.java index b727774ea2d1d..808173005b96f 100644 --- a/distribution/tools/windows-service-cli/src/test/java/org/elasticsearch/windows/service/WindowsServiceCliTestCase.java +++ b/distribution/tools/windows-service-cli/src/test/java/org/elasticsearch/windows/service/WindowsServiceCliTestCase.java @@ -8,6 +8,8 @@ package org.elasticsearch.windows.service; +import com.carrotsearch.randomizedtesting.annotations.ParametersFactory; + import org.elasticsearch.cli.CommandTestCase; import org.junit.Before; @@ -47,6 +49,15 @@ public abstract class WindowsServiceCliTestCase extends CommandTestCase { int mockProcessExit = 0; ProcessValidator mockProcessValidator = null; + @ParametersFactory + public static Iterable spaceInPathProvider() { + return List.of(new Object[] { true }, new Object[] { false }); + } + + protected WindowsServiceCliTestCase(boolean spaceInPath) { + super(spaceInPath); + } + interface ProcessValidator { void validate(Map env, ProcrunCall procrunCall); } @@ -106,16 +117,22 @@ protected Process mockProcess(ProcessBuilder processBuilder) throws IOException private static final Pattern commandPattern = Pattern.compile("//([A-Z]{2})/([\\w-]+)"); private static ProcrunCall parseProcrunCall(String unparsedArgs) { + // command/exe is quoted + assert unparsedArgs.charAt(0) == '"'; + int idx = unparsedArgs.indexOf('"', 1); + String exe = unparsedArgs.substring(0, idx + 1); + // Strip the leading command/exe from the args + unparsedArgs = unparsedArgs.substring(idx + 1).stripLeading(); + String[] splitArgs = unparsedArgs.split(" "); - assertThat(unparsedArgs, splitArgs.length, greaterThanOrEqualTo(2)); + assertThat(unparsedArgs, splitArgs.length, greaterThanOrEqualTo(1)); Map> args = new HashMap<>(); - String exe = splitArgs[0]; - Matcher commandMatcher = commandPattern.matcher(splitArgs[1]); - assertThat(splitArgs[1], commandMatcher.matches(), is(true)); + Matcher commandMatcher = commandPattern.matcher(splitArgs[0]); + assertThat(splitArgs[0], commandMatcher.matches(), is(true)); String command = commandMatcher.group(1); String serviceId = commandMatcher.group(2); - int i = 2; + int i = 1; while (i < splitArgs.length) { String arg = splitArgs[i]; assertThat("procrun args begin with -- or ++", arg, anyOf(startsWith("--"), startsWith("++"))); @@ -165,8 +182,12 @@ public void resetMockProcess() throws Exception { protected abstract String getDefaultFailureMessage(); + static String quote(String s) { + return '"' + s + '"'; + } + protected String getExe() { - return serviceExe.toString(); + return quote(serviceExe.toString()); } protected boolean includeLogsArgs() { diff --git a/distribution/tools/windows-service-cli/src/test/java/org/elasticsearch/windows/service/WindowsServiceInstallCommandTests.java b/distribution/tools/windows-service-cli/src/test/java/org/elasticsearch/windows/service/WindowsServiceInstallCommandTests.java index ffd0e16fd6f79..0db531074498f 100644 --- a/distribution/tools/windows-service-cli/src/test/java/org/elasticsearch/windows/service/WindowsServiceInstallCommandTests.java +++ b/distribution/tools/windows-service-cli/src/test/java/org/elasticsearch/windows/service/WindowsServiceInstallCommandTests.java @@ -31,6 +31,10 @@ public class WindowsServiceInstallCommandTests extends WindowsServiceCliTestCase Path jvmDll; + public WindowsServiceInstallCommandTests(boolean spaceInPath) { + super(spaceInPath); + } + @Before public void setupJvm() throws Exception { jvmDll = javaHome.resolve("jre/bin/server/jvm.dll"); @@ -80,7 +84,7 @@ public void testAlternateDllLocation() throws Exception { } public void testDll() throws Exception { - assertServiceArgs(Map.of("Jvm", jvmDll.toString())); + assertServiceArgs(Map.of("Jvm", quote(jvmDll.toString()))); } public void testPreExecuteOutput() throws Exception { @@ -95,9 +99,9 @@ public void testJvmOptions() throws Exception { sysprops.put("es.distribution.type", "testdistro"); List expectedOptions = List.of( "" + "-XX:+UseSerialGC", - "-Des.path.home=" + esHomeDir.toString(), - "-Des.path.conf=" + esHomeDir.resolve("config").toString(), - "-Des.distribution.type=testdistro" + "-Des.path.home=" + quote(esHomeDir.toString()), + "-Des.path.conf=" + quote(esHomeDir.resolve("config").toString()), + "-Des.distribution.type=" + quote("testdistro") ); mockProcessValidator = (environment, procrunCall) -> { List options = procrunCall.args().get("JvmOptions"); @@ -136,7 +140,7 @@ public void testFixedArgs() throws Exception { entry("StopMode", "jvm"), entry("JvmMs", "4m"), entry("JvmMx", "64m"), - entry("StartPath", esHomeDir.toString()), + entry("StartPath", quote(esHomeDir.toString())), entry("Classpath", "javaclasspath") // dummy value for tests ) ); diff --git a/distribution/tools/windows-service-cli/src/test/java/org/elasticsearch/windows/service/WindowsServiceManagerCommandTests.java b/distribution/tools/windows-service-cli/src/test/java/org/elasticsearch/windows/service/WindowsServiceManagerCommandTests.java index cd3aea949f0f6..1699dd3f78316 100644 --- a/distribution/tools/windows-service-cli/src/test/java/org/elasticsearch/windows/service/WindowsServiceManagerCommandTests.java +++ b/distribution/tools/windows-service-cli/src/test/java/org/elasticsearch/windows/service/WindowsServiceManagerCommandTests.java @@ -13,6 +13,11 @@ import java.io.IOException; public class WindowsServiceManagerCommandTests extends WindowsServiceCliTestCase { + + public WindowsServiceManagerCommandTests(boolean spaceInPath) { + super(spaceInPath); + } + @Override protected Command newCommand() { return new WindowsServiceManagerCommand() { @@ -25,7 +30,7 @@ Process startProcess(ProcessBuilder processBuilder) throws IOException { @Override protected String getExe() { - return mgrExe.toString(); + return quote(mgrExe.toString()); } @Override diff --git a/distribution/tools/windows-service-cli/src/test/java/org/elasticsearch/windows/service/WindowsServiceRemoveCommandTests.java b/distribution/tools/windows-service-cli/src/test/java/org/elasticsearch/windows/service/WindowsServiceRemoveCommandTests.java index 3d2032d75a195..d0e72e9de5c66 100644 --- a/distribution/tools/windows-service-cli/src/test/java/org/elasticsearch/windows/service/WindowsServiceRemoveCommandTests.java +++ b/distribution/tools/windows-service-cli/src/test/java/org/elasticsearch/windows/service/WindowsServiceRemoveCommandTests.java @@ -13,6 +13,11 @@ import java.io.IOException; public class WindowsServiceRemoveCommandTests extends WindowsServiceCliTestCase { + + public WindowsServiceRemoveCommandTests(boolean spaceInPath) { + super(spaceInPath); + } + @Override protected Command newCommand() { return new WindowsServiceRemoveCommand() { diff --git a/distribution/tools/windows-service-cli/src/test/java/org/elasticsearch/windows/service/WindowsServiceStartCommandTests.java b/distribution/tools/windows-service-cli/src/test/java/org/elasticsearch/windows/service/WindowsServiceStartCommandTests.java index 7a30540d53ba0..502008d22422f 100644 --- a/distribution/tools/windows-service-cli/src/test/java/org/elasticsearch/windows/service/WindowsServiceStartCommandTests.java +++ b/distribution/tools/windows-service-cli/src/test/java/org/elasticsearch/windows/service/WindowsServiceStartCommandTests.java @@ -13,6 +13,11 @@ import java.io.IOException; public class WindowsServiceStartCommandTests extends WindowsServiceCliTestCase { + + public WindowsServiceStartCommandTests(boolean spaceInPath) { + super(spaceInPath); + } + @Override protected Command newCommand() { return new WindowsServiceStartCommand() { diff --git a/distribution/tools/windows-service-cli/src/test/java/org/elasticsearch/windows/service/WindowsServiceStopCommandTests.java b/distribution/tools/windows-service-cli/src/test/java/org/elasticsearch/windows/service/WindowsServiceStopCommandTests.java index f623c5d2465f3..a36e090bd7ac4 100644 --- a/distribution/tools/windows-service-cli/src/test/java/org/elasticsearch/windows/service/WindowsServiceStopCommandTests.java +++ b/distribution/tools/windows-service-cli/src/test/java/org/elasticsearch/windows/service/WindowsServiceStopCommandTests.java @@ -13,6 +13,11 @@ import java.io.IOException; public class WindowsServiceStopCommandTests extends WindowsServiceCliTestCase { + + public WindowsServiceStopCommandTests(boolean spaceInPath) { + super(spaceInPath); + } + @Override protected Command newCommand() { return new WindowsServiceStopCommand() { diff --git a/docs/changelog/81162.yaml b/docs/changelog/81162.yaml deleted file mode 100644 index ca3b9379f8591..0000000000000 --- a/docs/changelog/81162.yaml +++ /dev/null @@ -1,6 +0,0 @@ -pr: 81162 -summary: Stop making index read-only when executing force merge index lifecycle management action -area: Infra/Core -type: enhancement -issues: - - 81162 diff --git a/docs/changelog/81322.yaml b/docs/changelog/81322.yaml deleted file mode 100644 index 8ebcdd39264fd..0000000000000 --- a/docs/changelog/81322.yaml +++ /dev/null @@ -1,5 +0,0 @@ -pr: 81322 -summary: Speed counting filters/range/date_histogram aggs -area: Aggregations -type: enhancement -issues: [] diff --git a/docs/changelog/83055.yaml b/docs/changelog/83055.yaml deleted file mode 100644 index ce00cf31d2b89..0000000000000 --- a/docs/changelog/83055.yaml +++ /dev/null @@ -1,5 +0,0 @@ -pr: 83055 -summary: New `frequent_items` aggregation -area: Machine Learning -type: enhancement -issues: [] diff --git a/docs/changelog/83345.yaml b/docs/changelog/83345.yaml deleted file mode 100644 index 570dc85b319e2..0000000000000 --- a/docs/changelog/83345.yaml +++ /dev/null @@ -1,5 +0,0 @@ -pr: 83345 -summary: Add min_* conditions to rollover -area: ILM+SLM -type: enhancement -issues: [] diff --git a/docs/changelog/85455.yaml b/docs/changelog/85455.yaml deleted file mode 100644 index cc1117d58f489..0000000000000 --- a/docs/changelog/85455.yaml +++ /dev/null @@ -1,5 +0,0 @@ -pr: 85455 -summary: App permissions with action patterns do not retrieve privileges -area: Authorization -type: enhancement -issues: [] diff --git a/docs/changelog/85688.yaml b/docs/changelog/85688.yaml deleted file mode 100644 index 1232dacd1a1d0..0000000000000 --- a/docs/changelog/85688.yaml +++ /dev/null @@ -1,5 +0,0 @@ -pr: 85688 -summary: Speed up `NumberFieldMapper` -area: Mapping -type: enhancement -issues: [] diff --git a/docs/changelog/85729.yaml b/docs/changelog/85729.yaml deleted file mode 100644 index 291475dd67b96..0000000000000 --- a/docs/changelog/85729.yaml +++ /dev/null @@ -1,6 +0,0 @@ -pr: 85729 -summary: Update bucket metric pipeline agg paths to allow intermediate single bucket - and bucket qualified multi-bucket aggs -area: Aggregations -type: enhancement -issues: [] diff --git a/docs/changelog/86132.yaml b/docs/changelog/86132.yaml deleted file mode 100644 index ec4e97cc0b751..0000000000000 --- a/docs/changelog/86132.yaml +++ /dev/null @@ -1,6 +0,0 @@ -pr: 86132 -summary: Add debug information to `ReactiveReason` about assigned and unassigned shards -area: Allocation -type: enhancement -issues: - - 85243 diff --git a/docs/changelog/86514.yaml b/docs/changelog/86514.yaml deleted file mode 100644 index 69b1fa9e68802..0000000000000 --- a/docs/changelog/86514.yaml +++ /dev/null @@ -1,5 +0,0 @@ -pr: 86514 -summary: Make snapshot deletes not block the repository during data blob deletes -area: Snapshot/Restore -type: enhancement -issues: [] diff --git a/docs/changelog/86524.yaml b/docs/changelog/86524.yaml deleted file mode 100644 index 35652fffcd74e..0000000000000 --- a/docs/changelog/86524.yaml +++ /dev/null @@ -1,5 +0,0 @@ -pr: 86524 -summary: Master stability health indicator part 1 (when a master has been seen recently) -area: Health -type: feature -issues: [] diff --git a/docs/changelog/86612.yaml b/docs/changelog/86612.yaml deleted file mode 100644 index 2a4386ab29050..0000000000000 --- a/docs/changelog/86612.yaml +++ /dev/null @@ -1,5 +0,0 @@ -pr: 86612 -summary: Stable logging API - the basic use case -area: Infra/Logging -type: feature -issues: [] diff --git a/docs/changelog/86630.yaml b/docs/changelog/86630.yaml deleted file mode 100644 index cd3ce6db4c490..0000000000000 --- a/docs/changelog/86630.yaml +++ /dev/null @@ -1,5 +0,0 @@ -pr: 86630 -summary: Upgrade to Netty 4.1.77 -area: Network -type: upgrade -issues: [] diff --git a/docs/changelog/86740.yaml b/docs/changelog/86740.yaml deleted file mode 100644 index 35002561d31b7..0000000000000 --- a/docs/changelog/86740.yaml +++ /dev/null @@ -1,5 +0,0 @@ -pr: 86740 -summary: Log warning when hash function used by cache is not recommended in FIPS mode -area: FIPS -type: enhancement -issues: [] diff --git a/docs/changelog/86759.yaml b/docs/changelog/86759.yaml deleted file mode 100644 index a0514eed12a79..0000000000000 --- a/docs/changelog/86759.yaml +++ /dev/null @@ -1,5 +0,0 @@ -pr: 86759 -summary: Batch ILM move to retry step task update -area: ILM+SLM -type: bug -issues: [] diff --git a/docs/changelog/86838.yaml b/docs/changelog/86838.yaml deleted file mode 100644 index 77e1e8b2f8956..0000000000000 --- a/docs/changelog/86838.yaml +++ /dev/null @@ -1,5 +0,0 @@ -pr: 86838 -summary: Adding cardinality support for `random_sampler` agg -area: Aggregations -type: enhancement -issues: [] diff --git a/docs/changelog/86859.yaml b/docs/changelog/86859.yaml deleted file mode 100644 index 031ca7705d80a..0000000000000 --- a/docs/changelog/86859.yaml +++ /dev/null @@ -1,5 +0,0 @@ -pr: 86859 -summary: Add mapping stats for indexed `dense_vectors` -area: Search -type: enhancement -issues: [] diff --git a/docs/changelog/86935.yaml b/docs/changelog/86935.yaml deleted file mode 100644 index 1d76071657927..0000000000000 --- a/docs/changelog/86935.yaml +++ /dev/null @@ -1,6 +0,0 @@ -pr: 86935 -summary: Minor `RangeAgg` optimization -area: Aggregations -type: enhancement -issues: - - 84262 diff --git a/docs/changelog/86982.yaml b/docs/changelog/86982.yaml deleted file mode 100644 index 822ec785c2c25..0000000000000 --- a/docs/changelog/86982.yaml +++ /dev/null @@ -1,6 +0,0 @@ -pr: 86982 -summary: Implement support for partial search results in SQL CLI -area: SQL -type: enhancement -issues: - - 86082 diff --git a/docs/changelog/87091.yaml b/docs/changelog/87091.yaml deleted file mode 100644 index d9c393dc892e0..0000000000000 --- a/docs/changelog/87091.yaml +++ /dev/null @@ -1,5 +0,0 @@ -pr: 87091 -summary: Expand allowed NER labels to be any I-O-B tagged labels -area: Machine Learning -type: enhancement -issues: [] diff --git a/docs/changelog/87097.yaml b/docs/changelog/87097.yaml deleted file mode 100644 index 87eb0c1f143e1..0000000000000 --- a/docs/changelog/87097.yaml +++ /dev/null @@ -1,5 +0,0 @@ -pr: 87097 -summary: User Profile - audit support for security domain -area: Audit -type: enhancement -issues: [] diff --git a/docs/changelog/87100.yaml b/docs/changelog/87100.yaml deleted file mode 100644 index 42a3ff29b3438..0000000000000 --- a/docs/changelog/87100.yaml +++ /dev/null @@ -1,5 +0,0 @@ -pr: 87100 -summary: Give doc-value-only mappings to numeric fields on metrics templates -area: Data streams -type: enhancement -issues: [] diff --git a/docs/changelog/87132.yaml b/docs/changelog/87132.yaml deleted file mode 100644 index 349fa62494885..0000000000000 --- a/docs/changelog/87132.yaml +++ /dev/null @@ -1,5 +0,0 @@ -pr: 87132 -summary: Catch an exception when formatting a string fails -area: Infra/Logging -type: enhancement -issues: [] diff --git a/docs/changelog/87151.yaml b/docs/changelog/87151.yaml deleted file mode 100644 index 80bc9d5fffb39..0000000000000 --- a/docs/changelog/87151.yaml +++ /dev/null @@ -1,6 +0,0 @@ -pr: 87151 -summary: Fix date range checks -area: SQL -type: bug -issues: - - 77179 diff --git a/docs/changelog/87220.yaml b/docs/changelog/87220.yaml deleted file mode 100644 index bd2ce8d239e00..0000000000000 --- a/docs/changelog/87220.yaml +++ /dev/null @@ -1,6 +0,0 @@ -pr: 87220 -summary: Make the metric in the `buckets_path` parameter optional -area: Aggregations -type: bug -issues: - - 72983 diff --git a/docs/changelog/87224.yaml b/docs/changelog/87224.yaml deleted file mode 100644 index e66452b99d0f1..0000000000000 --- a/docs/changelog/87224.yaml +++ /dev/null @@ -1,5 +0,0 @@ -pr: 87224 -summary: Cancellable Profile Has Privilege check -area: Authorization -type: enhancement -issues: [] diff --git a/docs/changelog/87229.yaml b/docs/changelog/87229.yaml deleted file mode 100644 index 445b5785e8856..0000000000000 --- a/docs/changelog/87229.yaml +++ /dev/null @@ -1,5 +0,0 @@ -pr: 87229 -summary: Support exists query for API key query -area: Security -type: enhancement -issues: [] diff --git a/docs/changelog/87236.yaml b/docs/changelog/87236.yaml deleted file mode 100644 index 990346efbb376..0000000000000 --- a/docs/changelog/87236.yaml +++ /dev/null @@ -1,5 +0,0 @@ -pr: 87236 -summary: JvmService use SingleObjectCache -area: Monitoring -type: enhancement -issues: [] diff --git a/docs/changelog/87260.yaml b/docs/changelog/87260.yaml deleted file mode 100644 index 92141a922522d..0000000000000 --- a/docs/changelog/87260.yaml +++ /dev/null @@ -1,5 +0,0 @@ -pr: 87260 -summary: System indices ignore all user templates -area: Infra/Core -type: bug -issues: [42508,74271] diff --git a/docs/changelog/87269.yaml b/docs/changelog/87269.yaml deleted file mode 100644 index 1c401c7669ba6..0000000000000 --- a/docs/changelog/87269.yaml +++ /dev/null @@ -1,6 +0,0 @@ -pr: 87269 -summary: "TSDB: Implement downsampling ILM Action for time-series indices" -area: TSDB -type: feature -issues: - - 68609 diff --git a/docs/changelog/87298.yaml b/docs/changelog/87298.yaml deleted file mode 100644 index 981066a4487c2..0000000000000 --- a/docs/changelog/87298.yaml +++ /dev/null @@ -1,5 +0,0 @@ -pr: 87298 -summary: Allow start cluster with unreachable remote clusters -area: Network -type: enhancement -issues: [] diff --git a/docs/changelog/87299.yaml b/docs/changelog/87299.yaml deleted file mode 100644 index dcb6c55d12f24..0000000000000 --- a/docs/changelog/87299.yaml +++ /dev/null @@ -1,5 +0,0 @@ -pr: 87299 -summary: Using the correct connection to fetch remote master history -area: Health -type: bug -issues: [] diff --git a/docs/changelog/87306.yaml b/docs/changelog/87306.yaml deleted file mode 100644 index 456fd48b444b5..0000000000000 --- a/docs/changelog/87306.yaml +++ /dev/null @@ -1,5 +0,0 @@ -pr: 87306 -summary: Adding a transport action to get cluster formation info -area: Health -type: enhancement -issues: [] diff --git a/docs/changelog/87309.yaml b/docs/changelog/87309.yaml deleted file mode 100644 index 61c1b28386702..0000000000000 --- a/docs/changelog/87309.yaml +++ /dev/null @@ -1,5 +0,0 @@ -pr: 87309 -summary: "Script: Add Metadata to ingest context" -area: Infra/Scripting -type: enhancement -issues: [] diff --git a/docs/changelog/87338.yaml b/docs/changelog/87338.yaml deleted file mode 100644 index afd8560cb552b..0000000000000 --- a/docs/changelog/87338.yaml +++ /dev/null @@ -1,5 +0,0 @@ -pr: 87338 -summary: Disallow three-digit minor and revision versions -area: Infra/Core -type: bug -issues: [] diff --git a/docs/changelog/87352.yaml b/docs/changelog/87352.yaml deleted file mode 100644 index e3fa123d52c6f..0000000000000 --- a/docs/changelog/87352.yaml +++ /dev/null @@ -1,6 +0,0 @@ -pr: 87352 -summary: Only perform `ensureNoSelfReferences` check during ingest when needed -area: Ingest -type: enhancement -issues: - - 87335 diff --git a/docs/changelog/87354.yaml b/docs/changelog/87354.yaml deleted file mode 100644 index 8bcef731203ad..0000000000000 --- a/docs/changelog/87354.yaml +++ /dev/null @@ -1,5 +0,0 @@ -pr: 87354 -summary: Allow pipeline processor to ignore missing pipelines -area: Ingest -type: enhancement -issues: [] diff --git a/docs/changelog/87361.yaml b/docs/changelog/87361.yaml deleted file mode 100644 index fcca508ff249b..0000000000000 --- a/docs/changelog/87361.yaml +++ /dev/null @@ -1,6 +0,0 @@ -pr: 87361 -summary: "Implement per-transform num_failure_retries setting" -area: Transform -type: enhancement -issues: [] - diff --git a/docs/changelog/87363.yaml b/docs/changelog/87363.yaml deleted file mode 100644 index 2dd8bbf36f116..0000000000000 --- a/docs/changelog/87363.yaml +++ /dev/null @@ -1,5 +0,0 @@ -pr: 87363 -summary: Log warning when hashers for stored API keys or service tokens are not compliant with FIPS -area: FIPS -type: enhancement -issues: [] diff --git a/docs/changelog/87366.yaml b/docs/changelog/87366.yaml deleted file mode 100644 index 0b2881e7c4778..0000000000000 --- a/docs/changelog/87366.yaml +++ /dev/null @@ -1,5 +0,0 @@ -pr: 87366 -summary: Improve scalability of NLP models -area: Machine Learning -type: enhancement -issues: [] diff --git a/docs/changelog/87412.yaml b/docs/changelog/87412.yaml deleted file mode 100644 index 7b516edf35571..0000000000000 --- a/docs/changelog/87412.yaml +++ /dev/null @@ -1,5 +0,0 @@ -pr: 87412 -summary: Expose segment details in PCSS debug log -area: Cluster Coordination -type: enhancement -issues: [] diff --git a/docs/changelog/87414.yaml b/docs/changelog/87414.yaml deleted file mode 100644 index 602b7b4b303eb..0000000000000 --- a/docs/changelog/87414.yaml +++ /dev/null @@ -1,6 +0,0 @@ -pr: 87414 -summary: Handle ordering in plain highlighter for multiple inputs -area: Highlighting -type: bug -issues: - - 87210 diff --git a/docs/changelog/87439.yaml b/docs/changelog/87439.yaml deleted file mode 100644 index 5dff7d9def686..0000000000000 --- a/docs/changelog/87439.yaml +++ /dev/null @@ -1,5 +0,0 @@ -pr: 87439 -summary: Optimize geogrid aggregations for singleton points -area: Geo -type: enhancement -issues: [] diff --git a/docs/changelog/87461.yaml b/docs/changelog/87461.yaml deleted file mode 100644 index 7cbe41b89905a..0000000000000 --- a/docs/changelog/87461.yaml +++ /dev/null @@ -1,6 +0,0 @@ -pr: 87461 -summary: 'Return action denied error when user with insufficient privileges (`manage_own_api_key`) attempts a grant API key request' -area: Authorization -type: enhancement -issues: - - 87438 diff --git a/docs/changelog/87474.yaml b/docs/changelog/87474.yaml deleted file mode 100644 index 723223c91d066..0000000000000 --- a/docs/changelog/87474.yaml +++ /dev/null @@ -1,5 +0,0 @@ -pr: 87474 -summary: Keep track of desired nodes status in cluster state -area: Autoscaling -type: enhancement -issues: [] diff --git a/docs/changelog/87482.yaml b/docs/changelog/87482.yaml deleted file mode 100644 index 52aa635e55f74..0000000000000 --- a/docs/changelog/87482.yaml +++ /dev/null @@ -1,5 +0,0 @@ -pr: 87482 -summary: Adding additional capability to the `master_is_stable` health indicator service -area: Health -type: enhancement -issues: [] diff --git a/docs/changelog/87491.yaml b/docs/changelog/87491.yaml deleted file mode 100644 index c018d4eed3c16..0000000000000 --- a/docs/changelog/87491.yaml +++ /dev/null @@ -1,5 +0,0 @@ -pr: 87491 -summary: Update version of internal http client -area: "Infra/Core" -type: enhancement -issues: [] diff --git a/docs/changelog/87498.yaml b/docs/changelog/87498.yaml deleted file mode 100644 index 1b58a5afbe07b..0000000000000 --- a/docs/changelog/87498.yaml +++ /dev/null @@ -1,5 +0,0 @@ -pr: 87498 -summary: Avoid attempting PIT close on PIT open failure -area: EQL -type: bug -issues: [] diff --git a/docs/changelog/87505.yaml b/docs/changelog/87505.yaml deleted file mode 100644 index 98f3868bae14c..0000000000000 --- a/docs/changelog/87505.yaml +++ /dev/null @@ -1,6 +0,0 @@ -pr: 87505 -summary: Clamp auto-expand replicas to the closest value -area: Allocation -type: bug -issues: - - 84788 diff --git a/docs/changelog/87515.yaml b/docs/changelog/87515.yaml deleted file mode 100644 index 1161fa8cab389..0000000000000 --- a/docs/changelog/87515.yaml +++ /dev/null @@ -1,5 +0,0 @@ -pr: 87515 -summary: Use a faster but less accurate log algorithm for computing Geotile Y coordinate -area: Geo -type: enhancement -issues: [] diff --git a/docs/changelog/87520.yaml b/docs/changelog/87520.yaml deleted file mode 100644 index 370c779761f18..0000000000000 --- a/docs/changelog/87520.yaml +++ /dev/null @@ -1,6 +0,0 @@ -pr: 87520 -summary: Remove cluster block preflight check from health api -area: Health -type: enhancement -issues: - - 87464 diff --git a/docs/changelog/87554.yaml b/docs/changelog/87554.yaml deleted file mode 100644 index 589e3ee616e2e..0000000000000 --- a/docs/changelog/87554.yaml +++ /dev/null @@ -1,5 +0,0 @@ -pr: 87554 -summary: "[TSDB] Add Kahan support to downsampling summation" -area: "Rollup" -type: enhancement -issues: [] diff --git a/docs/changelog/87556.yaml b/docs/changelog/87556.yaml deleted file mode 100644 index 370b65cff3bd4..0000000000000 --- a/docs/changelog/87556.yaml +++ /dev/null @@ -1,5 +0,0 @@ -pr: 87556 -summary: Report overall mapping size in cluster stats -area: Cluster Coordination -type: enhancement -issues: [] diff --git a/docs/changelog/87570.yaml b/docs/changelog/87570.yaml deleted file mode 100644 index 0b3215a5fcf5d..0000000000000 --- a/docs/changelog/87570.yaml +++ /dev/null @@ -1,5 +0,0 @@ -pr: 87570 -summary: Add authorization info to transform config listings -area: Transform -type: enhancement -issues: [] diff --git a/docs/changelog/87590.yaml b/docs/changelog/87590.yaml deleted file mode 100644 index e03e0dccf2364..0000000000000 --- a/docs/changelog/87590.yaml +++ /dev/null @@ -1,6 +0,0 @@ -pr: 87590 -summary: Add support for VERSION field type in SQL and EQL -area: Query Languages -type: enhancement -issues: - - 83375 diff --git a/docs/changelog/87672.yaml b/docs/changelog/87672.yaml deleted file mode 100644 index e3cfa1ae1d625..0000000000000 --- a/docs/changelog/87672.yaml +++ /dev/null @@ -1,5 +0,0 @@ -pr: 87672 -summary: Move the master stability logic into its own service separate from the `HealthIndicatorService` -area: Health -type: enhancement -issues: [] diff --git a/docs/changelog/87695.yaml b/docs/changelog/87695.yaml deleted file mode 100644 index 281283a6971cf..0000000000000 --- a/docs/changelog/87695.yaml +++ /dev/null @@ -1,5 +0,0 @@ -pr: 87695 -summary: Do not include desired nodes in snapshots -area: Autoscaling -type: bug -issues: [] diff --git a/docs/changelog/87717.yaml b/docs/changelog/87717.yaml deleted file mode 100644 index 2e23ebbaf19fa..0000000000000 --- a/docs/changelog/87717.yaml +++ /dev/null @@ -1,6 +0,0 @@ -pr: 87717 -summary: Add rollover permissions for `remote_monitoring_agent` -area: Authorization -type: bug -issues: - - 84161 diff --git a/docs/changelog/87723.yaml b/docs/changelog/87723.yaml deleted file mode 100644 index 9027c17b74a6c..0000000000000 --- a/docs/changelog/87723.yaml +++ /dev/null @@ -1,5 +0,0 @@ -pr: 87723 -summary: Optimize log cluster health performance. -area: Allocation -type: enhancement -issues: [] diff --git a/docs/changelog/87735.yaml b/docs/changelog/87735.yaml deleted file mode 100644 index 43f296f1411ee..0000000000000 --- a/docs/changelog/87735.yaml +++ /dev/null @@ -1,5 +0,0 @@ -pr: 87735 -summary: Use desired nodes during data tier allocation decisions -area: Allocation -type: enhancement -issues: [] diff --git a/docs/changelog/87773.yaml b/docs/changelog/87773.yaml deleted file mode 100644 index 65b62e21a374f..0000000000000 --- a/docs/changelog/87773.yaml +++ /dev/null @@ -1,5 +0,0 @@ -pr: 87773 -summary: Automatically close idle connections in OIDC back-channel -area: Security -type: enhancement -issues: [] diff --git a/docs/changelog/87778.yaml b/docs/changelog/87778.yaml deleted file mode 100644 index cd0a4edcf0c3e..0000000000000 --- a/docs/changelog/87778.yaml +++ /dev/null @@ -1,6 +0,0 @@ -pr: 87778 -summary: Make Desired Nodes API operator-only -area: Distributed -type: enhancement -issues: - - 87777 diff --git a/docs/changelog/87841.yaml b/docs/changelog/87841.yaml deleted file mode 100644 index 683e94d996229..0000000000000 --- a/docs/changelog/87841.yaml +++ /dev/null @@ -1,6 +0,0 @@ -pr: 87841 -summary: Remove any existing `read_only_allow_delete` index blocks when `cluster.routing.allocation.disk.threshold_enabled` is set to `false` -area: Allocation -type: bug -issues: - - 86383 diff --git a/docs/changelog/87868.yaml b/docs/changelog/87868.yaml deleted file mode 100644 index eba9ae0a27d03..0000000000000 --- a/docs/changelog/87868.yaml +++ /dev/null @@ -1,5 +0,0 @@ -pr: 87868 -summary: Add setting for `tcp_keepalive` for oidc back-channel -area: Security -type: enhancement -issues: [] diff --git a/docs/changelog/87884.yaml b/docs/changelog/87884.yaml deleted file mode 100644 index 31c68e1c1643d..0000000000000 --- a/docs/changelog/87884.yaml +++ /dev/null @@ -1,5 +0,0 @@ -pr: 87884 -summary: Add authorization info to ML config listings -area: Machine Learning -type: enhancement -issues: [] diff --git a/docs/changelog/87895.yaml b/docs/changelog/87895.yaml deleted file mode 100644 index 4cb3419ead676..0000000000000 --- a/docs/changelog/87895.yaml +++ /dev/null @@ -1,5 +0,0 @@ -pr: 87895 -summary: Add processors to autoscaling capacity response -area: Autoscaling -type: enhancement -issues: [] diff --git a/docs/changelog/87942.yaml b/docs/changelog/87942.yaml deleted file mode 100644 index c9e619271c2a9..0000000000000 --- a/docs/changelog/87942.yaml +++ /dev/null @@ -1,5 +0,0 @@ -pr: 87942 -summary: Improve console exception messages -area: Infra/Core -type: enhancement -issues: [] diff --git a/docs/changelog/87984.yaml b/docs/changelog/87984.yaml deleted file mode 100644 index 8f3a3e5f028eb..0000000000000 --- a/docs/changelog/87984.yaml +++ /dev/null @@ -1,5 +0,0 @@ -pr: 87984 -summary: Creating a transport action for the `CoordinationDiagnosticsService` -area: Health -type: enhancement -issues: [] diff --git a/docs/changelog/87989.yaml b/docs/changelog/87989.yaml deleted file mode 100644 index 7796dabb66ce2..0000000000000 --- a/docs/changelog/87989.yaml +++ /dev/null @@ -1,5 +0,0 @@ -pr: 87989 -summary: Move the ingest attachment processor to the default distribution -area: Ingest -type: enhancement -issues: [] diff --git a/docs/changelog/87999.yaml b/docs/changelog/87999.yaml deleted file mode 100644 index 9216f0a9b457c..0000000000000 --- a/docs/changelog/87999.yaml +++ /dev/null @@ -1,5 +0,0 @@ -pr: 87999 -summary: Fix unique realm name check to cover default realms -area: Authentication -type: bug -issues: [] diff --git a/docs/changelog/88013.yaml b/docs/changelog/88013.yaml deleted file mode 100644 index 3a4533728db70..0000000000000 --- a/docs/changelog/88013.yaml +++ /dev/null @@ -1,6 +0,0 @@ -pr: 88013 -summary: Periodic warning for 1-node cluster w/ seed hosts -area: Cluster Coordination -type: enhancement -issues: - - 85222 diff --git a/docs/changelog/88015.yaml b/docs/changelog/88015.yaml deleted file mode 100644 index d5b534c64690a..0000000000000 --- a/docs/changelog/88015.yaml +++ /dev/null @@ -1,6 +0,0 @@ -pr: 88015 -summary: Retry after all S3 get failures that made progress -area: Snapshot/Restore -type: enhancement -issues: - - 87243 diff --git a/docs/changelog/88023.yaml b/docs/changelog/88023.yaml deleted file mode 100644 index 94b00865601c0..0000000000000 --- a/docs/changelog/88023.yaml +++ /dev/null @@ -1,5 +0,0 @@ -pr: 88023 -summary: "If signature validation fails, reload JWKs and retry if new JWKs are found" -area: Authentication -type: enhancement -issues: [] diff --git a/docs/changelog/88031.yaml b/docs/changelog/88031.yaml deleted file mode 100644 index d879b2bb16eed..0000000000000 --- a/docs/changelog/88031.yaml +++ /dev/null @@ -1,5 +0,0 @@ -pr: 88031 -summary: Removing `BouncyCastle` dependencies from ingest-attachment plugin -area: Ingest -type: enhancement -issues: [] diff --git a/docs/changelog/88035.yaml b/docs/changelog/88035.yaml deleted file mode 100644 index 01152c930520a..0000000000000 --- a/docs/changelog/88035.yaml +++ /dev/null @@ -1,5 +0,0 @@ -pr: 88035 -summary: Sort ingest pipeline stats by use -area: Stats -type: enhancement -issues: [] diff --git a/docs/changelog/88039.yaml b/docs/changelog/88039.yaml deleted file mode 100644 index c029db184a486..0000000000000 --- a/docs/changelog/88039.yaml +++ /dev/null @@ -1,5 +0,0 @@ -pr: 88039 -summary: Update HDFS Repository to HDFS 3.3.3 -area: Snapshot/Restore -type: enhancement -issues: [] diff --git a/docs/changelog/88127.yaml b/docs/changelog/88127.yaml deleted file mode 100644 index 21d7ce9c4dd0f..0000000000000 --- a/docs/changelog/88127.yaml +++ /dev/null @@ -1,5 +0,0 @@ -pr: 88127 -summary: Stream input and output support for optional collections -area: Infra/Core -type: enhancement -issues: [] diff --git a/docs/changelog/88186.yaml b/docs/changelog/88186.yaml deleted file mode 100644 index f13b944126f69..0000000000000 --- a/docs/changelog/88186.yaml +++ /dev/null @@ -1,5 +0,0 @@ -pr: 88186 -summary: Support updates of API key attributes (single operation route) -area: Authentication -type: feature -issues: [] diff --git a/docs/changelog/88187.yaml b/docs/changelog/88187.yaml deleted file mode 100644 index 17067c06c5d3c..0000000000000 --- a/docs/changelog/88187.yaml +++ /dev/null @@ -1,5 +0,0 @@ -pr: 88187 -summary: Add deployed native models to `inference_stats` in trained model stats response -area: Machine Learning -type: enhancement -issues: [] diff --git a/docs/changelog/88211.yaml b/docs/changelog/88211.yaml deleted file mode 100644 index 48fdff8501ad9..0000000000000 --- a/docs/changelog/88211.yaml +++ /dev/null @@ -1,5 +0,0 @@ -pr: 88211 -summary: Add 'mode' option to `_source` field mapper -area: Search -type: feature -issues: [] diff --git a/docs/changelog/88221.yaml b/docs/changelog/88221.yaml deleted file mode 100644 index 279133075c519..0000000000000 --- a/docs/changelog/88221.yaml +++ /dev/null @@ -1,6 +0,0 @@ -pr: 88221 -summary: Propagate alias filters to significance aggs filters -area: Aggregations -type: bug -issues: - - 81585 diff --git a/docs/changelog/88231.yaml b/docs/changelog/88231.yaml deleted file mode 100644 index a4958b58805aa..0000000000000 --- a/docs/changelog/88231.yaml +++ /dev/null @@ -1,5 +0,0 @@ -pr: 88231 -summary: Use faster maths to project WGS84 to mercator -area: Geo -type: enhancement -issues: [] diff --git a/docs/changelog/88237.yaml b/docs/changelog/88237.yaml deleted file mode 100644 index 8e9e860905c2a..0000000000000 --- a/docs/changelog/88237.yaml +++ /dev/null @@ -1,5 +0,0 @@ -pr: 88237 -summary: Upgrade to Log4J 2.18.0 -area: Infra/Core -type: upgrade -issues: [] diff --git a/docs/changelog/88257.yaml b/docs/changelog/88257.yaml deleted file mode 100644 index feb5df85feafb..0000000000000 --- a/docs/changelog/88257.yaml +++ /dev/null @@ -1,6 +0,0 @@ -pr: 88257 -summary: INFO logging of snapshot restore and completion -area: Snapshot/Restore -type: enhancement -issues: - - 86610 diff --git a/docs/changelog/88260.yaml b/docs/changelog/88260.yaml deleted file mode 100644 index be9e96bfc4e54..0000000000000 --- a/docs/changelog/88260.yaml +++ /dev/null @@ -1,5 +0,0 @@ -pr: 88260 -summary: "[Stack Monitoring] Switch cgroup memory fields to keyword" -area: Monitoring -type: bug -issues: [] diff --git a/docs/changelog/88270.yaml b/docs/changelog/88270.yaml deleted file mode 100644 index e8705a5be1606..0000000000000 --- a/docs/changelog/88270.yaml +++ /dev/null @@ -1,5 +0,0 @@ -pr: 88270 -summary: Updatable API keys - REST API spec and tests -area: Security -type: enhancement -issues: [] diff --git a/docs/changelog/88271.yaml b/docs/changelog/88271.yaml deleted file mode 100644 index 7a34da66206f7..0000000000000 --- a/docs/changelog/88271.yaml +++ /dev/null @@ -1,6 +0,0 @@ -pr: 88271 -summary: "Fix: extract matrix stats using `bucket_selector` `buckets_path`" -area: Aggregations -type: bug -issues: - - 87454 diff --git a/docs/changelog/88273.yaml b/docs/changelog/88273.yaml deleted file mode 100644 index 86a9e51a17865..0000000000000 --- a/docs/changelog/88273.yaml +++ /dev/null @@ -1,6 +0,0 @@ -pr: 88273 -summary: Fix potential circuit breaker leak on `InternalGeoGrid` -area: Geo -type: bug -issues: - - 88261 diff --git a/docs/changelog/88276.yaml b/docs/changelog/88276.yaml deleted file mode 100644 index 942d83375b361..0000000000000 --- a/docs/changelog/88276.yaml +++ /dev/null @@ -1,5 +0,0 @@ -pr: 88276 -summary: Updatable API keys - logging audit trail event -area: Audit -type: enhancement -issues: [] diff --git a/docs/changelog/88292.yaml b/docs/changelog/88292.yaml deleted file mode 100644 index 383aa01adce0a..0000000000000 --- a/docs/changelog/88292.yaml +++ /dev/null @@ -1,6 +0,0 @@ -pr: 88292 -summary: Autoscaling during shrink -area: Autoscaling -type: bug -issues: - - 85480 diff --git a/docs/changelog/88295.yaml b/docs/changelog/88295.yaml deleted file mode 100644 index 93bea72f1da07..0000000000000 --- a/docs/changelog/88295.yaml +++ /dev/null @@ -1,5 +0,0 @@ -pr: 88295 -summary: Enforce max values limit only when running a script -area: Mapping -type: bug -issues: [] diff --git a/docs/changelog/88297.yaml b/docs/changelog/88297.yaml deleted file mode 100644 index 5a759223df69b..0000000000000 --- a/docs/changelog/88297.yaml +++ /dev/null @@ -1,5 +0,0 @@ -pr: 88297 -summary: Print full exception when console is non-interactive -area: Infra/Core -type: enhancement -issues: [] diff --git a/docs/changelog/88305.yaml b/docs/changelog/88305.yaml deleted file mode 100644 index 1ffae032aeb53..0000000000000 --- a/docs/changelog/88305.yaml +++ /dev/null @@ -1,5 +0,0 @@ -pr: 88305 -summary: Support "dry run" mode for updating Desired Nodes -area: Distributed -type: enhancement -issues: [] diff --git a/docs/changelog/88329.yaml b/docs/changelog/88329.yaml deleted file mode 100644 index b105f533fc6ba..0000000000000 --- a/docs/changelog/88329.yaml +++ /dev/null @@ -1,5 +0,0 @@ -pr: 88329 -summary: File Settings Service -area: Infra/Core -type: feature -issues: [] diff --git a/docs/changelog/88333.yaml b/docs/changelog/88333.yaml deleted file mode 100644 index f72dbe19ff1b7..0000000000000 --- a/docs/changelog/88333.yaml +++ /dev/null @@ -1,5 +0,0 @@ -pr: 88333 -summary: "Script: Metadata for update context" -area: Infra/Scripting -type: enhancement -issues: [] diff --git a/docs/changelog/88335.yaml b/docs/changelog/88335.yaml deleted file mode 100644 index 9f8c4f5688e5f..0000000000000 --- a/docs/changelog/88335.yaml +++ /dev/null @@ -1,5 +0,0 @@ -pr: 88335 -summary: Support `run_as` another user when granting API keys -area: Security -type: enhancement -issues: [] diff --git a/docs/changelog/88336.yaml b/docs/changelog/88336.yaml deleted file mode 100644 index a968b6b7fc28a..0000000000000 --- a/docs/changelog/88336.yaml +++ /dev/null @@ -1,6 +0,0 @@ -pr: 88336 -summary: Add `build_flavor` back to info api rest response -area: Infra/Core -type: bug -issues: - - 88318 diff --git a/docs/changelog/88344.yaml b/docs/changelog/88344.yaml deleted file mode 100644 index 8a052beaeddfb..0000000000000 --- a/docs/changelog/88344.yaml +++ /dev/null @@ -1,5 +0,0 @@ -pr: 88344 -summary: Speed up creating new `IndexMetaDataGenerations` without removed snapshots -area: Snapshot/Restore -type: enhancement -issues: [] diff --git a/docs/changelog/88346.yaml b/docs/changelog/88346.yaml deleted file mode 100644 index ca2537f28a5a9..0000000000000 --- a/docs/changelog/88346.yaml +++ /dev/null @@ -1,5 +0,0 @@ -pr: 88346 -summary: Updatable API keys - noop check -area: Security -type: enhancement -issues: [] diff --git a/docs/changelog/88347.yaml b/docs/changelog/88347.yaml deleted file mode 100644 index 33f19cdd079cb..0000000000000 --- a/docs/changelog/88347.yaml +++ /dev/null @@ -1,5 +0,0 @@ -pr: 88347 -summary: Fix NLP `question_answering` task when best answer is only one token -area: Machine Learning -type: bug -issues: [] diff --git a/docs/changelog/88358.yaml b/docs/changelog/88358.yaml deleted file mode 100644 index a3dd22bbe0a19..0000000000000 --- a/docs/changelog/88358.yaml +++ /dev/null @@ -1,5 +0,0 @@ -pr: 88385 -summary: Fixed NullPointerException on bulk request -area: Distributed -type: bug -issues: [] diff --git a/docs/changelog/88378.yaml b/docs/changelog/88378.yaml deleted file mode 100644 index 132e27468ee1d..0000000000000 --- a/docs/changelog/88378.yaml +++ /dev/null @@ -1,5 +0,0 @@ -pr: 88378 -summary: Indicate overall deployment failure if all node routes are failed -area: Machine Learning -type: enhancement -issues: [] diff --git a/docs/changelog/88385.yaml b/docs/changelog/88385.yaml deleted file mode 100644 index f3715950263d8..0000000000000 --- a/docs/changelog/88385.yaml +++ /dev/null @@ -1,6 +0,0 @@ -pr: 88385 -summary: Prevent re-balancing using outdated node weights in some cases -area: Allocation -type: bug -issues: - - 88384 diff --git a/docs/changelog/88397.yaml b/docs/changelog/88397.yaml deleted file mode 100644 index 47a7cbc101655..0000000000000 --- a/docs/changelog/88397.yaml +++ /dev/null @@ -1,5 +0,0 @@ -pr: 88397 -summary: Polling cluster formation state for master-is-stable health indicator -area: Health -type: enhancement -issues: [] diff --git a/docs/changelog/88398.yaml b/docs/changelog/88398.yaml deleted file mode 100644 index e694b560e9407..0000000000000 --- a/docs/changelog/88398.yaml +++ /dev/null @@ -1,5 +0,0 @@ -pr: 88398 -summary: Track the count of failed invocations since last successful policy snapshot -area: ILM+SLM -type: enhancement -issues: [] diff --git a/docs/changelog/88399.yaml b/docs/changelog/88399.yaml deleted file mode 100644 index f38fc092ae629..0000000000000 --- a/docs/changelog/88399.yaml +++ /dev/null @@ -1,6 +0,0 @@ -pr: 88399 -summary: Improve error when sorting on incompatible types -area: Search -type: enhancement -issues: - - 73146 diff --git a/docs/changelog/88413.yaml b/docs/changelog/88413.yaml deleted file mode 100644 index 5b2a172b1ba6e..0000000000000 --- a/docs/changelog/88413.yaml +++ /dev/null @@ -1,5 +0,0 @@ -pr: 88413 -summary: Ensure `CreateApiKey` always creates a new document -area: Security -type: bug -issues: [] diff --git a/docs/changelog/88445.yaml b/docs/changelog/88445.yaml deleted file mode 100644 index d0edbd4eb9345..0000000000000 --- a/docs/changelog/88445.yaml +++ /dev/null @@ -1,5 +0,0 @@ -pr: 88445 -summary: Add issuer to GET _ssl/certificates -area: TLS -type: enhancement -issues: [] diff --git a/docs/changelog/88450.yaml b/docs/changelog/88450.yaml deleted file mode 100644 index cf23825d2a45e..0000000000000 --- a/docs/changelog/88450.yaml +++ /dev/null @@ -1,5 +0,0 @@ -pr: 88450 -summary: Add new `cache_size` parameter to `trained_model` deployments API -area: Machine Learning -type: enhancement -issues: [] diff --git a/docs/changelog/88456.yaml b/docs/changelog/88456.yaml deleted file mode 100644 index bb3a5d1182365..0000000000000 --- a/docs/changelog/88456.yaml +++ /dev/null @@ -1,5 +0,0 @@ -pr: 88456 -summary: Audit API key ID when create or grant API keys -area: Audit -type: enhancement -issues: [] diff --git a/docs/changelog/88462.yaml b/docs/changelog/88462.yaml deleted file mode 100644 index f391ee08822f9..0000000000000 --- a/docs/changelog/88462.yaml +++ /dev/null @@ -1,5 +0,0 @@ -pr: 88462 -summary: Update Tableau connector to use connection dialog v2 -area: SQL -type: enhancement -issues: [] diff --git a/docs/changelog/88470.yaml b/docs/changelog/88470.yaml deleted file mode 100644 index 1063bb5440478..0000000000000 --- a/docs/changelog/88470.yaml +++ /dev/null @@ -1,6 +0,0 @@ -pr: 88470 -summary: Fix queued snapshot assignments after partial snapshot fails due to delete -area: Snapshot/Restore -type: bug -issues: - - 86724 diff --git a/docs/changelog/88479.yaml b/docs/changelog/88479.yaml deleted file mode 100644 index 5febaf0ab1232..0000000000000 --- a/docs/changelog/88479.yaml +++ /dev/null @@ -1,5 +0,0 @@ -pr: 88479 -summary: Deduplicate mappings in persisted cluster state -area: Cluster Coordination -type: enhancement -issues: [] diff --git a/docs/changelog/88487.yaml b/docs/changelog/88487.yaml deleted file mode 100644 index aacacfdd7a613..0000000000000 --- a/docs/changelog/88487.yaml +++ /dev/null @@ -1,5 +0,0 @@ -pr: 88487 -summary: Support cartesian shape with doc values -area: Geo -type: enhancement -issues: [] diff --git a/docs/changelog/88523.yaml b/docs/changelog/88523.yaml deleted file mode 100644 index 0c41e4160f117..0000000000000 --- a/docs/changelog/88523.yaml +++ /dev/null @@ -1,5 +0,0 @@ -pr: 88523 -summary: Add health user action for unhealthy SLM policy failure counts -area: Health -type: enhancement -issues: [] diff --git a/docs/changelog/88551.yaml b/docs/changelog/88551.yaml deleted file mode 100644 index 16cac9193eaa6..0000000000000 --- a/docs/changelog/88551.yaml +++ /dev/null @@ -1,6 +0,0 @@ -pr: 88551 -summary: "Fix: use status code 500 for aggregation reduce phase errors if no shard failed" -area: Search -type: bug -issues: - - 20004 diff --git a/docs/changelog/88553.yaml b/docs/changelog/88553.yaml deleted file mode 100644 index f9c3e278c7323..0000000000000 --- a/docs/changelog/88553.yaml +++ /dev/null @@ -1,6 +0,0 @@ -pr: 88553 -summary: "Remove help_url,rename summary to symptom, and `user_actions` to diagnosis" -area: Health -type: feature -issues: - - 88474 diff --git a/docs/changelog/88560.yaml b/docs/changelog/88560.yaml deleted file mode 100644 index deea1f5e71f70..0000000000000 --- a/docs/changelog/88560.yaml +++ /dev/null @@ -1,5 +0,0 @@ -pr: 88560 -summary: Always close directory streams -area: Infra/Core -type: bug -issues: [] diff --git a/docs/changelog/88584.yaml b/docs/changelog/88584.yaml deleted file mode 100644 index 77eb177633c11..0000000000000 --- a/docs/changelog/88584.yaml +++ /dev/null @@ -1,5 +0,0 @@ -pr: 88584 -summary: Fix docker positional params -area: Packaging -type: bug -issues: [] diff --git a/docs/changelog/88586.yaml b/docs/changelog/88586.yaml deleted file mode 100644 index f3c60f5c6705f..0000000000000 --- a/docs/changelog/88586.yaml +++ /dev/null @@ -1,6 +0,0 @@ -pr: 88586 -summary: Disable URL connection caching in SPIClassIterator -area: Infra/Plugins -type: bug -issues: - - 88275 diff --git a/docs/changelog/88603.yaml b/docs/changelog/88603.yaml deleted file mode 100644 index b369852823a2f..0000000000000 --- a/docs/changelog/88603.yaml +++ /dev/null @@ -1,5 +0,0 @@ -pr: 88603 -summary: Enable synthetic source support on constant keyword fields -area: Mapping -type: enhancement -issues: [] diff --git a/docs/changelog/88619.yaml b/docs/changelog/88619.yaml deleted file mode 100644 index 7a31e2748d19a..0000000000000 --- a/docs/changelog/88619.yaml +++ /dev/null @@ -1,5 +0,0 @@ -pr: 88619 -summary: Handle update error correctly -area: Transform -type: bug -issues: [] diff --git a/docs/changelog/88622.yaml b/docs/changelog/88622.yaml deleted file mode 100644 index 62c4080a69b01..0000000000000 --- a/docs/changelog/88622.yaml +++ /dev/null @@ -1,6 +0,0 @@ -pr: 88622 -summary: Use origin for the client when running _features/_reset -area: Infra/Core -type: bug -issues: - - 88617 diff --git a/docs/changelog/88626.yaml b/docs/changelog/88626.yaml deleted file mode 100644 index 406f8ac8199b8..0000000000000 --- a/docs/changelog/88626.yaml +++ /dev/null @@ -1,5 +0,0 @@ -pr: 88626 -summary: Adding the ability to register a `PeerFinderListener` to Coordinator -area: Distributed -type: enhancement -issues: [] diff --git a/docs/changelog/88638.yaml b/docs/changelog/88638.yaml deleted file mode 100644 index 1765fa300bf11..0000000000000 --- a/docs/changelog/88638.yaml +++ /dev/null @@ -1,5 +0,0 @@ -pr: 88638 -summary: Fix multi-value handling in composite agg -area: Aggregations -type: bug -issues: [] diff --git a/docs/changelog/88641.yaml b/docs/changelog/88641.yaml deleted file mode 100644 index 1d7a784cd41ce..0000000000000 --- a/docs/changelog/88641.yaml +++ /dev/null @@ -1,5 +0,0 @@ -pr: 88641 -summary: Replace health request with a state observer -area: Allocation -type: bug -issues: [] diff --git a/docs/changelog/88642.yaml b/docs/changelog/88642.yaml deleted file mode 100644 index 967ed0ea16d6c..0000000000000 --- a/docs/changelog/88642.yaml +++ /dev/null @@ -1,5 +0,0 @@ -pr: 88642 -summary: Include API key metadata in audit log when an API key is created, granted, or updated -area: Audit -type: enhancement -issues: [] diff --git a/docs/changelog/88655.yaml b/docs/changelog/88655.yaml deleted file mode 100644 index eb4c35c01c6be..0000000000000 --- a/docs/changelog/88655.yaml +++ /dev/null @@ -1,5 +0,0 @@ -pr: 88655 -summary: Make `bucket_correlation` aggregation generally available -area: Machine Learning -type: feature -issues: [] diff --git a/docs/changelog/88657.yaml b/docs/changelog/88657.yaml deleted file mode 100644 index 4b27646da2b4f..0000000000000 --- a/docs/changelog/88657.yaml +++ /dev/null @@ -1,5 +0,0 @@ -pr: 88657 -summary: Make `bucket_count_ks_test` aggregation generally available -area: Machine Learning -type: feature -issues: [] diff --git a/docs/changelog/88675.yaml b/docs/changelog/88675.yaml deleted file mode 100644 index 137e418269f56..0000000000000 --- a/docs/changelog/88675.yaml +++ /dev/null @@ -1,6 +0,0 @@ -pr: 88675 -summary: Upgrade to OpenJDK 18.0.2+9 -area: Packaging -type: upgrade -issues: - - 88673 diff --git a/docs/changelog/88694.yaml b/docs/changelog/88694.yaml deleted file mode 100644 index 6e7bbc6cafed9..0000000000000 --- a/docs/changelog/88694.yaml +++ /dev/null @@ -1,14 +0,0 @@ -pr: 88694 -summary: Integrate ANN into `_search` endpoint -area: Vector Search -type: feature -issues: - - 87625 -highlight: - title: Integrate ANN into `_search` endpoint - body: |- - This change adds a `knn` option to the `_search` API to support ANN - search. It's powered by the same Lucene ANN capabilities as the old - `_knn_search` endpoint. The `knn` option can be combined with other - search features like queries and aggregations. - notable: true diff --git a/docs/changelog/88702.yaml b/docs/changelog/88702.yaml deleted file mode 100644 index 50baa7e6c3c5a..0000000000000 --- a/docs/changelog/88702.yaml +++ /dev/null @@ -1,5 +0,0 @@ -pr: 88702 -summary: Fix validation of `close_pit` request -area: Search -type: bug -issues: [] diff --git a/docs/changelog/88703.yaml b/docs/changelog/88703.yaml deleted file mode 100644 index 778eb60be785b..0000000000000 --- a/docs/changelog/88703.yaml +++ /dev/null @@ -1,5 +0,0 @@ -pr: 88703 -summary: Update indices permissions to Enterprise Search service account -area: Authorization -type: enhancement -issues: [] diff --git a/docs/changelog/88707.yaml b/docs/changelog/88707.yaml deleted file mode 100644 index b40fc0d878471..0000000000000 --- a/docs/changelog/88707.yaml +++ /dev/null @@ -1,5 +0,0 @@ -pr: 88707 -summary: Avoid capturing `SnapshotsInProgress$Entry` in queue -area: Snapshot/Restore -type: bug -issues: [] diff --git a/docs/changelog/88719.yaml b/docs/changelog/88719.yaml deleted file mode 100644 index 374f253ab694b..0000000000000 --- a/docs/changelog/88719.yaml +++ /dev/null @@ -1,5 +0,0 @@ -pr: 88719 -summary: Convert disk watermarks to RelativeByteSizeValues -area: Infra/Settings -type: enhancement -issues: [] diff --git a/docs/changelog/88725.yaml b/docs/changelog/88725.yaml deleted file mode 100644 index c85677b1259b0..0000000000000 --- a/docs/changelog/88725.yaml +++ /dev/null @@ -1,6 +0,0 @@ -pr: 88725 -summary: Increase `http.max_header_size` default to 16kb -area: Network -type: enhancement -issues: - - 88501 diff --git a/docs/changelog/88740.yaml b/docs/changelog/88740.yaml deleted file mode 100644 index 1d796a7190bed..0000000000000 --- a/docs/changelog/88740.yaml +++ /dev/null @@ -1,5 +0,0 @@ -pr: 88740 -summary: "Script: `UpdateByQuery` can read doc version if requested" -area: "Infra/Scripting" -type: bug -issues: [] diff --git a/docs/changelog/88785.yaml b/docs/changelog/88785.yaml deleted file mode 100644 index 00b890f8aa99b..0000000000000 --- a/docs/changelog/88785.yaml +++ /dev/null @@ -1,6 +0,0 @@ -pr: 88785 -summary: Support kNN vectors in disk usage action -area: Search -type: enhancement -issues: - - 84801 diff --git a/docs/changelog/88807.yaml b/docs/changelog/88807.yaml deleted file mode 100644 index 553f69bb291dd..0000000000000 --- a/docs/changelog/88807.yaml +++ /dev/null @@ -1,5 +0,0 @@ -pr: 88807 -summary: Add inference cache hit count to inference node stats -area: Machine Learning -type: enhancement -issues: [] diff --git a/docs/changelog/88811.yaml b/docs/changelog/88811.yaml deleted file mode 100644 index 33f9148a4e7d8..0000000000000 --- a/docs/changelog/88811.yaml +++ /dev/null @@ -1,5 +0,0 @@ -pr: 88811 -summary: Fix NPE when checking if the last snapshot was success -area: Health -type: bug -issues: [] diff --git a/docs/changelog/88825.yaml b/docs/changelog/88825.yaml deleted file mode 100644 index 4af0d242fa62c..0000000000000 --- a/docs/changelog/88825.yaml +++ /dev/null @@ -1,5 +0,0 @@ -pr: 88825 -summary: fix minor tokenization bug when using fill_mask task with roberta tokenizer -area: Machine Learning -type: bug -issues: [] diff --git a/docs/painless/painless-guide/painless-runtime-fields.asciidoc b/docs/painless/painless-guide/painless-runtime-fields.asciidoc index 11df87af0ad3f..5252a89923889 100644 --- a/docs/painless/painless-guide/painless-runtime-fields.asciidoc +++ b/docs/painless/painless-guide/painless-runtime-fields.asciidoc @@ -6,9 +6,9 @@ filtering, and sorting. When defining a runtime field, you can include a Painless script that is evaluated at query time. This script has access to the entire context of a -document, including the original `_source` and any mapped fields plus their -values. At query time, the script runs and generates values for each scripted -field that is included in the query. +document, including the original document {ref}/modules-scripting-fields.html[`_source` field] +and any mapped fields plus their values. At query time, the script runs and +generates values for each scripted field that is included in the query. You can map a runtime field in the `runtime` section under the mapping definition, or define runtime fields that exist only as part of a search diff --git a/docs/plugins/analysis-kuromoji.asciidoc b/docs/plugins/analysis-kuromoji.asciidoc index 75bd6cc446d0d..9759b7fdd21f5 100644 --- a/docs/plugins/analysis-kuromoji.asciidoc +++ b/docs/plugins/analysis-kuromoji.asciidoc @@ -10,12 +10,12 @@ include::install_remove.asciidoc[] [[analysis-kuromoji-analyzer]] ==== `kuromoji` analyzer -The `kuromoji` analyzer consists of the following tokenizer and token filters: +The `kuromoji` analyzer uses the following analysis chain: +* `CJKWidthCharFilter` from Lucene * <> * <> token filter * <> token filter -* {ref}/analysis-cjk-width-tokenfilter.html[`cjk_width`] token filter * <> token filter * <> token filter * {ref}/analysis-lowercase-tokenfilter.html[`lowercase`] token filter diff --git a/docs/plugins/authors.asciidoc b/docs/plugins/authors.asciidoc index 8ffa6a0f5fe5a..7c1f285cb9e4b 100644 --- a/docs/plugins/authors.asciidoc +++ b/docs/plugins/authors.asciidoc @@ -3,12 +3,12 @@ :plugin-properties-files: {elasticsearch-root}/build-tools/src/main/resources -The Elasticsearch repository contains https://github.com/elastic/elasticsearch/tree/master/plugins/examples[examples of plugins]. Some of these include: +The Elasticsearch repository contains {es-repo}tree/main/plugins/examples[examples of plugins]. Some of these include: -* a plugin with https://github.com/elastic/elasticsearch/tree/master/plugins/examples/custom-settings[custom settings] -* adding https://github.com/elastic/elasticsearch/tree/master/plugins/examples/rest-handler[custom rest endpoints] -* adding a https://github.com/elastic/elasticsearch/tree/master/plugins/examples/rescore[custom rescorer] -* a script https://github.com/elastic/elasticsearch/tree/master/plugins/examples/script-expert-scoring[implemented in Java] +* a plugin with {es-repo}tree/main/plugins/examples/custom-settings[custom settings] +* adding {es-repo}tree/main/plugins/examples/rest-handler[custom rest endpoints] +* adding a {es-repo}tree/main/plugins/examples/rescore[custom rescorer] +* a script {es-repo}tree/main/plugins/examples/script-expert-scoring[implemented in Java] These examples provide the bare bones needed to get started. For more information about how to write a plugin, we recommend looking at the plugins diff --git a/docs/plugins/mapper-size.asciidoc b/docs/plugins/mapper-size.asciidoc index 50b2586f6f000..1929daa89bf92 100644 --- a/docs/plugins/mapper-size.asciidoc +++ b/docs/plugins/mapper-size.asciidoc @@ -83,3 +83,12 @@ GET my-index-000001/_search {ref}/search-fields.html#script-fields[script field] to return the `_size` field in the search response. +[NOTE] +.Using `_size` in {kib} +================================================ + +To use the `_size` field in {kib}, update the `metaFields` setting and add +`_size` to the list of meta fields. `metaFields` can be configured in {kib} +from the Advanced Settings page in Management. + +================================================ \ No newline at end of file diff --git a/docs/plugins/plugin-script.asciidoc b/docs/plugins/plugin-script.asciidoc index 6b3eb89a3958e..f04c181152306 100644 --- a/docs/plugins/plugin-script.asciidoc +++ b/docs/plugins/plugin-script.asciidoc @@ -109,7 +109,7 @@ to a local Java truststore and pass the location to the script as follows: + [source,shell] ----------------------------------- -sudo ES_JAVA_OPTS="-Djavax.net.ssl.trustStore=/path/to/trustStore.jks" bin/elasticsearch-plugin install https://host/plugin.zip +sudo CLI_JAVA_OPTS="-Djavax.net.ssl.trustStore=/path/to/trustStore.jks" bin/elasticsearch-plugin install https://host/plugin.zip ----------------------------------- -- @@ -261,19 +261,19 @@ sudo ES_PATH_CONF=/path/to/conf/dir bin/elasticsearch-plugin install > to run your +requests asynchronously. + ==== Syntax @@ -55,7 +59,8 @@ A `frequent_items` aggregation looks like this in isolation: ==== Fields Supported field types for the analyzed fields are keyword, numeric, ip, date, -and arrays of these types. You can also add runtime fields to your analyzed fields. +and arrays of these types. You can also add runtime fields to your analyzed +fields. If the combined cardinality of the analyzed fields are high, then the aggregation might require a significant amount of system resources. @@ -113,9 +118,12 @@ and (2.) from which cities they make those purchases. We are interested in sets with three or more items, and want to see the first three frequent item sets with the highest support. +Note that we use the <> endpoint in this first +example. + [source,console] ------------------------------------------------- -GET kibana_sample_data_ecommerce /_search +POST /kibana_sample_data_ecommerce /_async_search { "size": 0, "aggs": { @@ -123,7 +131,7 @@ GET kibana_sample_data_ecommerce /_search "frequent_items": { "minimum_set_size": 3, "fields": [ - { "field": "category.keyword" }, + { "field": "category.keyword" }, { "field": "geoip.city_name" } ], "size": 3 @@ -134,6 +142,15 @@ GET kibana_sample_data_ecommerce /_search ------------------------------------------------- // TEST[skip:setup kibana sample data] +The response of the API call above contains an identifier (`id`) of the async +search request. You can use the identifier to retrieve the search results: + +[source,console] +------------------------------------------------- +GET /_async_search/ +------------------------------------------------- +// TEST[skip:setup kibana sample data] + The API returns a response similar to the following one: [source,console-result] @@ -141,9 +158,9 @@ The API returns a response similar to the following one: (...) "aggregations" : { "my_agg" : { - "buckets" : [ + "buckets" : [ <1> { - "key" : { + "key" : { <2> "category.keyword" : [ "Women's Clothing", "Women's Shoes" @@ -152,8 +169,8 @@ The API returns a response similar to the following one: "New York" ] }, - "doc_count" : 217, - "support" : 0.04641711229946524 + "doc_count" : 217, <3> + "support" : 0.04641711229946524 <4> }, { "key" : { @@ -188,6 +205,13 @@ The API returns a response similar to the following one: ------------------------------------------------- // TEST[skip:setup kibana sample data] +<1> The array of returned item sets. +<2> The `key` object contains one item set. In this case, it consists of two +values of the `category.keyword` field and one value of the `geoip.city_name`. +<3> The number of documents that contain the item set. +<4> The support value of the item set. It is calculated by dividing the number +of documents containing the item set by the total number of documents. + The response shows that the categories customers purchase from most frequently together are `Women's Clothing` and `Women's Shoes` and customers from New York tend to buy items from these categories frequently togeher. In other words, diff --git a/docs/reference/aggregations/pipeline/bucket-correlation-aggregation.asciidoc b/docs/reference/aggregations/pipeline/bucket-correlation-aggregation.asciidoc index ae6ceb2f16c94..841632124805f 100644 --- a/docs/reference/aggregations/pipeline/bucket-correlation-aggregation.asciidoc +++ b/docs/reference/aggregations/pipeline/bucket-correlation-aggregation.asciidoc @@ -5,6 +5,8 @@ Bucket correlation ++++ +experimental::[] + A sibling pipeline aggregation which executes a correlation function on the configured sibling multi-bucket aggregation. diff --git a/docs/reference/aggregations/pipeline/bucket-count-ks-test-aggregation.asciidoc b/docs/reference/aggregations/pipeline/bucket-count-ks-test-aggregation.asciidoc index 50185e1aec56c..631ee48923f1e 100644 --- a/docs/reference/aggregations/pipeline/bucket-count-ks-test-aggregation.asciidoc +++ b/docs/reference/aggregations/pipeline/bucket-count-ks-test-aggregation.asciidoc @@ -5,6 +5,8 @@ Bucket count K-S test ++++ +experimental::[] + A sibling pipeline aggregation which executes a two sample Kolmogorov–Smirnov test (referred to as a "K-S test" from now on) against a provided distribution, and the distribution implied by the documents counts in the configured sibling aggregation. diff --git a/docs/reference/cat/snapshots.asciidoc b/docs/reference/cat/snapshots.asciidoc index 6a2bbf040d493..6c400e235cb2d 100644 --- a/docs/reference/cat/snapshots.asciidoc +++ b/docs/reference/cat/snapshots.asciidoc @@ -103,7 +103,7 @@ units>>. `total_shards`, `ts`:: (Default) Total number of shards in the snapshot. -`reason, `r`:: +`reason`, `r`:: Reason for any snapshot failures. -- diff --git a/docs/reference/ccr/apis/auto-follow/put-auto-follow-pattern.asciidoc b/docs/reference/ccr/apis/auto-follow/put-auto-follow-pattern.asciidoc index ed377e72fce49..e6509e024de84 100644 --- a/docs/reference/ccr/apis/auto-follow/put-auto-follow-pattern.asciidoc +++ b/docs/reference/ccr/apis/auto-follow/put-auto-follow-pattern.asciidoc @@ -58,7 +58,10 @@ This API creates a new named collection of <> against the remote cluster specified in the request body. Newly created indices on the remote cluster matching any of the specified patterns will be automatically configured as follower -indices. Additionally, this API can be used to update existing +indices. Indices on the remote cluster that were created before the auto-follow +pattern is created won't be auto-followed even if they match the pattern. + +This API can also be used to update existing <>. Note that follower indices that were configured automatically before updating an auto-follow pattern will remain unchanged even if they don't match against the new patterns. @@ -85,11 +88,14 @@ the new patterns. more `leader_index_patterns` and one or more `leader_index_exclusion_patterns` won't be followed. `follow_index_pattern`:: - (Optional, string) The name of follower index. The template `{{leader_index}}` - can be used to derive the name of the follower index from the name of the - leader index. When following a data stream, use `{{leader_index}}`; {ccr-init} - does not support changes to the names of a follower data stream's backing - indices. + (Optional, string) The name of follower index. The template `{{leader_index}}` can be used to + derive the name of the follower index from the name of the leader index. When following a data + stream, the `follow_index_pattern` will be used for renaming not only the leader index, but also + the data stream containing the leader index. For example, a data stream called + `logs-mysql-default` with a backing index of `.ds-logs-mysql-default-2022-01-01-000001` and a + `follow_index_pattern` of `{{leader_index}}_copy` will replicate the data stream as + `logs-mysql-default_copy` and the backing index as + `.ds-logs-mysql-default_copy-2022-01-01-000001`. include::../follow-request-body.asciidoc[] diff --git a/docs/reference/ccr/apis/follow/put-follow.asciidoc b/docs/reference/ccr/apis/follow/put-follow.asciidoc index d09eb51534042..93e8a710751a8 100644 --- a/docs/reference/ccr/apis/follow/put-follow.asciidoc +++ b/docs/reference/ccr/apis/follow/put-follow.asciidoc @@ -76,6 +76,26 @@ referenced leader index. When this API returns, the follower index exists, and (Required, string) The <> containing the leader index. +[[ccr-put-follow-request-body-data_stream_name]]`data_stream_name`:: + (Optional, string) If the leader index is part of a <>, the name to + which the local data stream for the followed index should be renamed. For example, A request like: + +[source,console] +-------------------------------------------------- +PUT /.ds-logs-mysql-default_copy-2022-01-01-000001/_ccr/follow +{ + "remote_cluster" : "remote_cluster", + "leader_index" : ".ds-logs-mysql-default-2022-01-01-000001", + "data_stream_name": "logs-mysql-default_copy" +} +-------------------------------------------------- +// TEST[skip:no setup] + +Replicates the leader index `.ds-logs-mysql-default-2022-01-01-000001` into the follower index +`.ds-logs-mysql-default_copy-2022-01-01-000001` and will do so using the data stream +`logs-mysql-default_copy`, as opposed to the original leader data stream name of +`logs-mysql-default`. + include::../follow-request-body.asciidoc[] [[ccr-put-follow-examples]] diff --git a/docs/reference/ccr/getting-started.asciidoc b/docs/reference/ccr/getting-started.asciidoc index 7b8a938132800..779dfc301baa8 100644 --- a/docs/reference/ccr/getting-started.asciidoc +++ b/docs/reference/ccr/getting-started.asciidoc @@ -99,7 +99,7 @@ example, `cluster.es.eastus2.staging.azure.foundit.no:9400` or [%collapsible%open] .API example ==== -You can also use the <> to +You can also use the <> to add a remote cluster: [source,console] @@ -159,14 +159,6 @@ cluster with cluster alias `leader`. connected to. ==== -[[ccr-enable-soft-deletes]] -==== Enable soft deletes on leader indices -To follow an index, it must have been created with -<> enabled. If the index doesn’t have -soft deletes enabled, you must reindex it and use the new index as the leader -index. Soft deletes are enabled by default on new indices -created with {es} 7.0.0 and later. - include::../../../x-pack/docs/en/security/authentication/remote-clusters-privileges.asciidoc[tag=configure-ccr-privileges] [[ccr-getting-started-follower-index]] @@ -200,8 +192,8 @@ image::images/ccr-follower-index.png["The Cross-Cluster Replication page in {kib [%collapsible%open] .API example ==== -You can also use the <> to create follower -indices. When you create a follower index, you must reference the remote cluster +You can also use the <> to create follower +indices. When you create a follower index, you must reference the remote cluster and the leader index that you created in the remote cluster. When initiating the follower request, the response returns before the @@ -255,7 +247,9 @@ POST /server-metrics-follower/_ccr/unfollow You use <> to automatically create new followers for rolling time series indices. Whenever the name of a new index on the remote cluster matches the auto-follow pattern, a corresponding follower -index is added to the local cluster. +index is added to the local cluster. Note that only indices created on the +remote cluster after the auto-follow pattern is created will be auto-followed: +existing indices on the remote cluster are ignored even if they match the pattern. An auto-follow pattern specifies the remote cluster you want to replicate from, and one or more index patterns that specify the rolling time series indices you diff --git a/docs/reference/docs/bulk.asciidoc b/docs/reference/docs/bulk.asciidoc index 0928541024b2e..bdbb40bb37a3b 100644 --- a/docs/reference/docs/bulk.asciidoc +++ b/docs/reference/docs/bulk.asciidoc @@ -140,7 +140,7 @@ JavaScript:: See {jsclient-current}/client-helpers.html[client.helpers.*] .NET:: - See https://www.elastic.co/guide/en/elasticsearch/client/net-api/current/indexing-documents.html#bulkall-observable[`BulkAllObservable`] + See https://www.elastic.co/guide/en/elasticsearch/client/net-api/current/indexing-documents.html[`BulkAllObservable`] PHP:: See https://www.elastic.co/guide/en/elasticsearch/client/php-api/current/indexing_documents.html#_bulk_indexing[Bulk indexing] diff --git a/docs/reference/high-availability/cluster-design.asciidoc b/docs/reference/high-availability/cluster-design.asciidoc index 919e026bbeea2..bddc8c206a7af 100644 --- a/docs/reference/high-availability/cluster-design.asciidoc +++ b/docs/reference/high-availability/cluster-design.asciidoc @@ -338,12 +338,16 @@ You should use <> to ensure that there is a copy of each shard in each zone. This means either zone remains fully available if the other zone fails. -All master-eligible nodes, including voting-only nodes, are on the critical path -for publishing cluster state updates. Because of this, these nodes require -reasonably fast persistent storage and a reliable, low-latency network -connection to the rest of the cluster. If you add a tiebreaker node in a third -independent zone then you must make sure it has adequate resources and good -connectivity to the rest of the cluster. +All master-eligible nodes, including voting-only nodes, are on the critical +path for <>. Cluster +state updates are usually independent of performance-critical workloads such as +indexing or searches, but they are involved in management activities such as +index creation and rollover, mapping updates, and recovery after a failure. The +performance characteristics of these activities are a function of the speed of +the storage on each master-eligible node, as well as the reliability and +latency of the network interconnections between all nodes in the cluster. You +must therefore ensure that the storage and networking available to the +nodes in your cluster are good enough to meet your performance goals. [[high-availability-cluster-design-three-zones]] ==== Clusters with three or more zones diff --git a/docs/reference/how-to/disk-usage.asciidoc b/docs/reference/how-to/disk-usage.asciidoc index d9dcf5832741d..6eef9621ed9de 100644 --- a/docs/reference/how-to/disk-usage.asciidoc +++ b/docs/reference/how-to/disk-usage.asciidoc @@ -136,6 +136,6 @@ if fields always occur in the same order. [[roll-up-historical-data]] === Roll up historical data -Keeping older data can useful for later analysis but is often avoided due to +Keeping older data can be useful for later analysis but is often avoided due to storage costs. You can use data rollups to summarize and store historical data at a fraction of the raw data's storage cost. See <>. diff --git a/docs/reference/how-to/size-your-shards.asciidoc b/docs/reference/how-to/size-your-shards.asciidoc index c06986d405f9b..d06891278336b 100644 --- a/docs/reference/how-to/size-your-shards.asciidoc +++ b/docs/reference/how-to/size-your-shards.asciidoc @@ -55,14 +55,14 @@ thread pool>>. This can result in low throughput and slow search speeds. [discrete] [[each-shard-has-overhead]] -==== Each index, shard and field has overhead +==== Each index, shard, segment and field has overhead Every index and every shard requires some memory and CPU resources. In most cases, a small set of large shards uses fewer resources than many small shards. Segments play a big role in a shard's resource usage. Most shards contain -several segments, which store its index data. {es} keeps segment metadata in -JVM heap memory so it can be quickly retrieved for searches. As a shard grows, +several segments, which store its index data. {es} keeps some segment metadata +in heap memory so it can be quickly retrieved for searches. As a shard grows, its segments are <> into fewer, larger segments. This decreases the number of segments, which means less metadata is kept in heap memory. @@ -72,6 +72,13 @@ space. By default {es} will automatically create a mapping for every field in every document it indexes, but you can switch off this behaviour to <>. +Moreover every segment requires a small amount of heap memory for each mapped +field. This per-segment-per-field heap overhead includes a copy of the field +name, encoded using ISO-8859-1 if applicable or UTF-16 otherwise. Usually this +is not noticeable, but you may need to account for this overhead if your shards +have high segment counts and the corresponding mappings contain high field +counts and/or very long field names. + [discrete] [[shard-auto-balance]] ==== {es} automatically balances shards within a data tier @@ -175,17 +182,25 @@ index prirep shard store [discrete] [[shard-count-recommendation]] -==== Aim for 3000 indices or fewer per GB of heap memory on each master node +==== Master-eligible nodes should have at least 1GB of heap per 3000 indices The number of indices a master node can manage is proportional to its heap size. The exact amount of heap memory needed for each index depends on various factors such as the size of the mapping and the number of shards per index. -As a general rule of thumb, you should aim for 3000 indices or fewer per GB of -heap on master nodes. For example, if your cluster contains 12000 indices then -each dedicated master node should have at least 4GB of heap. For non-dedicated -master nodes, the same rule holds and should be added to the heap requirements -of the other roles of each node. +As a general rule of thumb, you should have fewer than 3000 indices per GB of +heap on master nodes. For example, if your cluster has dedicated master nodes +with 4GB of heap each then you should have fewer than 12000 indices. If your +master nodes are not dedicated master nodes then the same sizing guidance +applies: you should reserve at least 1GB of heap on each master-eligible node +for every 3000 indices in your cluster. + +Note that this rule defines the absolute maximum number of indices that a +master node can manage, but does not guarantee the performance of searches or +indexing involving this many indices. You must also ensure that your data nodes +have adequate resources for your workload and that your overall sharding +strategy meets all your performance requirements. See also +<> and <>. To check the configured size of each node's heap, use the <>. @@ -207,7 +222,7 @@ GET _cat/shards?v=true [discrete] [[field-count-recommendation]] -==== Allow 1kB of heap per field per index on data nodes, plus overheads +==== Data nodes should have at least 1kB of heap per field per index, plus overheads The exact resource usage of each mapped field depends on its type, but a rule of thumb is to allow for approximately 1kB of heap overhead per mapped field @@ -222,6 +237,13 @@ For example, if a data node holds shards from 1000 indices, each containing of heap for the fields and another 0.5GB of heap for its workload and other overheads, and therefore this node will need a heap size of at least 4.5GB. +Note that this rule defines the absolute maximum number of indices that a data +node can manage, but does not guarantee the performance of searches or indexing +involving this many indices. You must also ensure that your data nodes have +adequate resources for your workload and that your overall sharding strategy +meets all your performance requirements. See also <> +and <>. + [discrete] [[avoid-node-hotspots]] ==== Avoid node hotspots diff --git a/docs/reference/ilm/actions/ilm-rollover.asciidoc b/docs/reference/ilm/actions/ilm-rollover.asciidoc index 980ec54591038..823ba92f29778 100644 --- a/docs/reference/ilm/actions/ilm-rollover.asciidoc +++ b/docs/reference/ilm/actions/ilm-rollover.asciidoc @@ -4,7 +4,8 @@ Phases allowed: hot. -Rolls over a target to a new index when the existing index meets one or more of the rollover conditions. +Rolls over a target to a new index when the existing index satisfies +the specified rollover conditions. IMPORTANT: If the rollover action is used on a <>, policy execution waits until the leader index rolls over (or is @@ -45,8 +46,11 @@ PUT my-index-000001 [[ilm-rollover-options]] ==== Options -You must specify at least one rollover condition. -An empty rollover action is invalid. +A rollover action must specify at least one max_* condition, it may include zero +or more min_* conditions. An empty rollover action is invalid. + +The index will rollover once any max_* condition is satisfied and all +min_* conditions are satisfied. // tag::rollover-conditions[] `max_age`:: @@ -90,6 +94,32 @@ replicas are ignored. + TIP: To see the current shard docs, use the <> API. The `docs` value shows the number of documents each shard. + +`min_age`:: +(Optional, <>) +Prevents rollover until after the minimum elapsed time from index creation is reached. +See notes on `max_age`. + +`min_docs`:: +(Optional, integer) +Prevents rollover until after the specified minimum number of documents is reached. +See notes on `max_docs`. + +`min_size`:: +(Optional, <>) +Prevents rollover until the index reaches a certain size. +See notes on `max_size`. + +`min_primary_shard_size`:: +(Optional, <>) +Prevents rollover until the largest primary shard in the index reaches a certain size. +See notes on `max_primary_shard_size`. + +`min_primary_shard_docs`:: +(Optional, integer) +Prevents rollover until the largest primary shard in the index reaches a certain number of documents. +See notes on `max_primary_shard_docs`. + // end::rollover-conditions[] [[ilm-rollover-ex]] @@ -109,7 +139,7 @@ PUT _ilm/policy/my_policy "hot": { "actions": { "rollover" : { - "max_primary_shard_size": "50GB" + "max_primary_shard_size": "50gb" } } } @@ -132,7 +162,7 @@ PUT _ilm/policy/my_policy "hot": { "actions": { "rollover" : { - "max_size": "100GB" + "max_size": "100gb" } } } @@ -214,8 +244,9 @@ PUT _ilm/policy/my_policy ===== Roll over using multiple conditions When you specify multiple rollover conditions, -the index is rolled over when _any_ of the conditions are met. -This example rolls the index over if it is at least 7 days old or at least 100 gigabytes. +the index is rolled over when _any_ of the max_* and _all_ of the min_* conditions are met. +This example rolls the index over if it is at least 7 days old or at least 100 gigabytes, +but only as long as the index is not empty. [source,console] -------------------------------------------------- @@ -227,7 +258,35 @@ PUT _ilm/policy/my_policy "actions": { "rollover" : { "max_age": "7d", - "max_size": "100GB" + "max_size": "100gb", + "min_docs": 1 + } + } + } + } + } +} +-------------------------------------------------- + +[ilm-rollover-conditions-ex]] +===== Roll over while maintaining shard sizes + +This example rolls the index over when the primary shard size is at least 50gb, +or when the index is at least 30 days old, but only as long as a primary shard is at least 1gb. +For low-volume indices, this prevents the creation of many small shards. + +[source,console] +-------------------------------------------------- +PUT _ilm/policy/my_policy +{ + "policy": { + "phases": { + "hot": { + "actions": { + "rollover" : { + "max_primary_shard_size": "50gb", + "max_age": "30d", + "min_primary_shard_size": "1gb" } } } @@ -254,7 +313,7 @@ PUT /_ilm/policy/rollover_policy "hot": { "actions": { "rollover": { - "max_size": "50GB" + "max_size": "50gb" } } }, diff --git a/docs/reference/ilm/apis/get-lifecycle.asciidoc b/docs/reference/ilm/apis/get-lifecycle.asciidoc index f736de20ed437..0ff1496a21f35 100644 --- a/docs/reference/ilm/apis/get-lifecycle.asciidoc +++ b/docs/reference/ilm/apis/get-lifecycle.asciidoc @@ -115,6 +115,7 @@ If the request succeeds, the body of the response contains the policy definition } } -------------------------------------------------- +// TEST[skip:https://github.com/elastic/elasticsearch/issues/89623] // TESTRESPONSE[s/"modified_date": 82392349/"modified_date": $body.my_policy.modified_date/] <1> The policy version is incremented whenever the policy is updated diff --git a/docs/reference/ilm/index-rollover.asciidoc b/docs/reference/ilm/index-rollover.asciidoc index 9c69ad968041c..3755619a6f15a 100644 --- a/docs/reference/ilm/index-rollover.asciidoc +++ b/docs/reference/ilm/index-rollover.asciidoc @@ -43,7 +43,7 @@ On each rollover, the new index becomes the write index. === Automatic rollover {ilm-init} enables you to automatically roll over to a new index based -on the index size, document count, or age. When a rollover is triggered, a new +on conditions like the index size, document count, or age. When a rollover is triggered, a new index is created, the write alias is updated to point to the new index, and all subsequent updates are written to the new index. diff --git a/docs/reference/indices/rollover-index.asciidoc b/docs/reference/indices/rollover-index.asciidoc index d8d67c29a540e..3869f35b560fa 100644 --- a/docs/reference/indices/rollover-index.asciidoc +++ b/docs/reference/indices/rollover-index.asciidoc @@ -111,7 +111,7 @@ include::{es-repo-dir}/indices/create-index.asciidoc[tag=index-name-reqs] `dry_run`:: (Optional, Boolean) -If `true`, checks whether the current index matches one or more specified +If `true`, checks whether the current index satisfies the specified `conditions` but does not perform a rollover. Defaults to `false`. include::{es-repo-dir}/rest-api/common-parms.asciidoc[tag=wait_for_active_shards] @@ -132,10 +132,14 @@ include::{es-repo-dir}/indices/create-index.asciidoc[tag=aliases-props] `conditions`:: (Optional, object) Conditions for the rollover. If specified, {es} only performs the rollover if -the current index meets one or more of these conditions. If this parameter is +the current index satisfies these conditions. If this parameter is not specified, {es} performs the rollover unconditionally. + -IMPORTANT: To trigger a rollover, the current index must meet these conditions +If conditions are specified, at least one of them must be a max_* condition. +The index will rollover if any max_* condition is satisfied and all +min_* conditions are satisfied. ++ +IMPORTANT: To trigger a rollover, the current index must satisfy these conditions at the time of the request. {es} does not monitor the index after the API response. To automate rollover, use {ilm-init}'s <> instead. @@ -197,7 +201,7 @@ conditions were specified, this is an empty object. ==== ``:: (Boolean) The key is each condition. The value is its result. If `true`, the -index met the condition at rollover. +index met the condition. ==== [[rollover-index-api-example]] diff --git a/docs/reference/ingest/common-log-format-example.asciidoc b/docs/reference/ingest/common-log-format-example.asciidoc index fed77aac94dd0..9ee5a73ceb70d 100644 --- a/docs/reference/ingest/common-log-format-example.asciidoc +++ b/docs/reference/ingest/common-log-format-example.asciidoc @@ -13,9 +13,7 @@ The logs you want to parse look similar to this: [source,log] ---- -212.87.37.154 - - [30/May/2099:16:21:15 +0000] \"GET /favicon.ico HTTP/1.1\" -200 3638 \"-\" \"Mozilla/5.0 (Macintosh; Intel Mac OS X 10_11_6) -AppleWebKit/537.36 (KHTML, like Gecko) Chrome/52.0.2743.116 Safari/537.36\" +212.87.37.154 - - [05/May/2099:16:21:15 +0000] "GET /favicon.ico HTTP/1.1" 200 3638 "-" "Mozilla/5.0 (Macintosh; Intel Mac OS X 10_11_6) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/52.0.2743.116 Safari/537.36" ---- // NOTCONSOLE @@ -30,7 +28,8 @@ Pipelines**. image::images/ingest/ingest-pipeline-list.png[Kibana's Ingest Pipelines list view,align="center"] . Click **Create pipeline > New pipeline**. -. Provide a name and description for the pipeline. +. Set **Name** to `my-pipeline` and optionally add a description for the +pipeline. . Add a <> to parse the log message: .. Click **Add a processor** and select the **Grok** processor type. @@ -39,7 +38,7 @@ image::images/ingest/ingest-pipeline-list.png[Kibana's Ingest Pipelines list vie + [source,grok] ---- -%{IPORHOST:source.ip} %{USER:user.id} %{USER:user.name} \\[%{HTTPDATE:@timestamp}\\] \"%{WORD:http.request.method} %{DATA:url.original} HTTP/%{NUMBER:http.version}\" %{NUMBER:http.response.status_code:int} (?:-|%{NUMBER:http.response.body.bytes:int}) %{QS:http.request.referrer} %{QS:user_agent} +%{IPORHOST:source.ip} %{USER:user.id} %{USER:user.name} \[%{HTTPDATE:@timestamp}\] "%{WORD:http.request.method} %{DATA:url.original} HTTP/%{NUMBER:http.version}" %{NUMBER:http.response.status_code:int} (?:-|%{NUMBER:http.response.body.bytes:int}) %{QS:http.request.referrer} %{QS:user_agent} ---- // NOTCONSOLE + diff --git a/docs/reference/mapping/fields/source-field.asciidoc b/docs/reference/mapping/fields/source-field.asciidoc index 76d98303dce82..f905be3d452ba 100644 --- a/docs/reference/mapping/fields/source-field.asciidoc +++ b/docs/reference/mapping/fields/source-field.asciidoc @@ -6,7 +6,6 @@ at index time. The `_source` field itself is not indexed (and thus is not searchable), but it is stored so that it can be returned when executing _fetch_ requests, like <> or <>. -ifeval::["{release-state}"=="unreleased"] If disk usage is important to you then have a look at <> which shrinks disk usage at the cost of only supporting a subset of mappings and slower fetches or (not recommended) @@ -14,8 +13,6 @@ only supporting a subset of mappings and slower fetches or (not recommended) usage but disables many features. include::synthetic-source.asciidoc[] -endif::[] - [[disable-source-field]] ==== Disabling the `_source` field diff --git a/docs/reference/mapping/fields/synthetic-source.asciidoc b/docs/reference/mapping/fields/synthetic-source.asciidoc index cb1534426d1c7..82c15a4b5e67b 100644 --- a/docs/reference/mapping/fields/synthetic-source.asciidoc +++ b/docs/reference/mapping/fields/synthetic-source.asciidoc @@ -1,5 +1,5 @@ [[synthetic-source]] -==== Synthetic `_source` +==== Synthetic `_source` preview:[] Though very handy to have around, the source field takes up a significant amount of space on disk. Instead of storing source documents on disk exactly as you diff --git a/docs/reference/mapping/params/copy-to.asciidoc b/docs/reference/mapping/params/copy-to.asciidoc index 8e6cb036e3326..10eebfb027736 100644 --- a/docs/reference/mapping/params/copy-to.asciidoc +++ b/docs/reference/mapping/params/copy-to.asciidoc @@ -67,6 +67,13 @@ Some important points: * You cannot copy recursively via intermediary fields such as a `copy_to` on `field_1` to `field_2` and `copy_to` on `field_2` to `field_3` expecting indexing into `field_1` will eventuate in `field_3`, instead use copy_to -directly to multiple fields from the originating field. +directly to multiple fields from the originating field. +* If the target field does not exist in the index mappings, the usual +<> behavior applies. By default, with +<> set to `true`, a non-existent target field will be +dynamically added to the index mappings. If `dynamic` is set to `false`, the +target field will not be added to the index mappings, and the value will not be +copied. If `dynamic` is set to `strict`, copying to a non-existent field will +result in an error. -NOTE: `copy-to` is _not_ supported for field types where values take the form of objects, e.g. `date_range` \ No newline at end of file +NOTE: `copy_to` is _not_ supported for field types where values take the form of objects, e.g. `date_range` \ No newline at end of file diff --git a/docs/reference/mapping/runtime.asciidoc b/docs/reference/mapping/runtime.asciidoc index f354e110dc213..264de46c6e632 100644 --- a/docs/reference/mapping/runtime.asciidoc +++ b/docs/reference/mapping/runtime.asciidoc @@ -58,7 +58,7 @@ the `fields` parameter on the `_search` API to runs only against the top hits just like script fields do. You can use <> to access values in `_source` and -return calculated values based on a script valuation. Runtime fields have these +return calculated values based on a script valuation. Runtime fields have the same capabilities, but provide greater flexibility because you can query and aggregate on runtime fields in a search request. Script fields can only fetch values. diff --git a/docs/reference/mapping/types.asciidoc b/docs/reference/mapping/types.asciidoc index c3116d56175ba..7108d536f8715 100644 --- a/docs/reference/mapping/types.asciidoc +++ b/docs/reference/mapping/types.asciidoc @@ -137,6 +137,8 @@ include::types/binary.asciidoc[] include::types/boolean.asciidoc[] +include::types/completion.asciidoc[] + include::types/date.asciidoc[] include::types/date_nanos.asciidoc[] diff --git a/docs/reference/mapping/types/boolean.asciidoc b/docs/reference/mapping/types/boolean.asciidoc index 52fefddd0fe68..ed6e2648dee4a 100644 --- a/docs/reference/mapping/types/boolean.asciidoc +++ b/docs/reference/mapping/types/boolean.asciidoc @@ -215,9 +215,8 @@ The following parameters are accepted by `boolean` fields: Metadata about the field. -ifeval::["{release-state}"=="unreleased"] [[boolean-synthetic-source]] -==== Synthetic source +==== Synthetic source preview:[] `boolean` fields support <> in their default configuration. Synthetic `_source` cannot be used together with <> or with <> disabled. @@ -249,4 +248,3 @@ Will become: } ---- // TEST[s/^/{"_source":/ s/\n$/}/] -endif::[] diff --git a/docs/reference/mapping/types/completion.asciidoc b/docs/reference/mapping/types/completion.asciidoc new file mode 100644 index 0000000000000..d8b1ce98b5292 --- /dev/null +++ b/docs/reference/mapping/types/completion.asciidoc @@ -0,0 +1,61 @@ +[[completion]] +=== Completion field type +++++ +Completion +++++ +// tag::completion-mapping[] +To use the <>, map the field from +which you want to generate suggestions as type `completion`. This indexes the +field values for fast completions. + +[source,console] +-------------------------------------------------- +PUT music +{ + "mappings": { + "properties": { + "suggest": { + "type": "completion" + } + } + } +} +-------------------------------------------------- + +==== Parameters for `completion` fields + +The following parameters are accepted by `completion` fields: + +[horizontal] +<>:: + + The index analyzer to use, defaults to `simple`. + +<>:: + + The search analyzer to use, defaults to value of `analyzer`. + +`preserve_separators`:: + + Preserves the separators, defaults to `true`. + If disabled, you could find a field starting with `Foo Fighters`, if you + suggest for `foof`. + +`preserve_position_increments`:: + + Enables position increments, defaults to `true`. + If disabled and using stopwords analyzer, you could get a + field starting with `The Beatles`, if you suggest for `b`. *Note*: You + could also achieve this by indexing two inputs, `Beatles` and + `The Beatles`, no need to change a simple analyzer, if you are able to + enrich your data. + +`max_input_length`:: + + Limits the length of a single input, defaults to `50` UTF-16 code points. + This limit is only used at index time to reduce the total number of + characters per input string in order to prevent massive inputs from + bloating the underlying datastructure. Most use cases won't be influenced + by the default value since prefix completions seldom grow beyond prefixes longer + than a handful of characters. +// end::completion-mapping[] \ No newline at end of file diff --git a/docs/reference/mapping/types/geo-point.asciidoc b/docs/reference/mapping/types/geo-point.asciidoc index 0b866861e7365..fad74ba733cc2 100644 --- a/docs/reference/mapping/types/geo-point.asciidoc +++ b/docs/reference/mapping/types/geo-point.asciidoc @@ -204,9 +204,8 @@ def lat = doc['location'].lat; def lon = doc['location'].lon; -------------------------------------------------- -ifeval::["{release-state}"=="unreleased"] [[geo-point-synthetic-source]] -==== Synthetic source +==== Synthetic source preview:[] `geo_point` fields support <> in their default configuration. Synthetic `_source` cannot be used together with <>, <>, or with @@ -246,4 +245,3 @@ Will become: } ---- // TEST[s/^/{"_source":/ s/\n$/}/] -endif::[] diff --git a/docs/reference/mapping/types/ip.asciidoc b/docs/reference/mapping/types/ip.asciidoc index 141a133184927..b35a5486906b7 100644 --- a/docs/reference/mapping/types/ip.asciidoc +++ b/docs/reference/mapping/types/ip.asciidoc @@ -151,9 +151,8 @@ GET my-index-000001/_search } -------------------------------------------------- -ifeval::["{release-state}"=="unreleased"] [[ip-synthetic-source]] -==== Synthetic source +==== Synthetic source preview:[] `ip` fields support <> in their default configuration. Synthetic `_source` cannot be used together with <>, <>, or with @@ -192,5 +191,3 @@ Will become: NOTE: IPv4 addresses are sorted as though they were IPv6 addresses prefixed by `::ffff:0:0:0/96` as specified by https://datatracker.ietf.org/doc/html/rfc6144[rfc6144]. - -endif::[] diff --git a/docs/reference/mapping/types/keyword.asciidoc b/docs/reference/mapping/types/keyword.asciidoc index de6080e8c1679..3e985c05e696f 100644 --- a/docs/reference/mapping/types/keyword.asciidoc +++ b/docs/reference/mapping/types/keyword.asciidoc @@ -174,9 +174,8 @@ Dimension fields have the following constraints: * The field cannot use a <>. -- -ifeval::["{release-state}"=="unreleased"] [[keyword-synthetic-source]] -==== Synthetic source +==== Synthetic source preview:[] `keyword` fields support <> in their default configuration. Synthetic `_source` cannot be used together with <>, a <>, @@ -212,8 +211,6 @@ Will become: ---- // TEST[s/^/{"_source":/ s/\n$/}/] -endif::[] - include::constant-keyword.asciidoc[] include::wildcard.asciidoc[] diff --git a/docs/reference/mapping/types/numeric.asciidoc b/docs/reference/mapping/types/numeric.asciidoc index ee347664319c4..0228430caf45d 100644 --- a/docs/reference/mapping/types/numeric.asciidoc +++ b/docs/reference/mapping/types/numeric.asciidoc @@ -229,15 +229,14 @@ endif::[] of `scaling_factor` improve accuracy but also increase space requirements. This parameter is required. -ifeval::["{release-state}"=="unreleased"] [[numeric-synthetic-source]] -==== Synthetic source +==== Synthetic source preview:[] All numeric fields except `unsigned_long` support <> in their default configuration. Synthetic `_source` cannot be used together with <>, <>, or with <> disabled. -Synthetic source always sorts numeric fields and removes duplicates. For example: +Synthetic source always sorts numeric fields. For example: [source,console,id=synthetic-source-numeric-example] ---- PUT idx @@ -293,5 +292,3 @@ Will become: } ---- // TEST[s/^/{"_source":/ s/\n$/}/] - -endif::[] diff --git a/docs/reference/mapping/types/text.asciidoc b/docs/reference/mapping/types/text.asciidoc index 5ba3d7fbbc46d..d6c51bf81f172 100644 --- a/docs/reference/mapping/types/text.asciidoc +++ b/docs/reference/mapping/types/text.asciidoc @@ -159,9 +159,8 @@ The following parameters are accepted by `text` fields: Metadata about the field. -ifeval::["{release-state}"=="unreleased"] [[text-synthetic-source]] -==== Synthetic source +==== Synthetic source preview:[] `text` fields support <> if they have a `keyword` sub-field that supports synthetic `_source` and *do not* have <>. @@ -214,7 +213,6 @@ NOTE: Reordering text fields can have an effect on <> for more detail. You can avoid this by making sure the `slop` parameter on the phrase queries is lower than the `position_increment_gap`. This is the default. -endif::[] [[fielddata-mapping-param]] ==== `fielddata` mapping parameter diff --git a/docs/reference/migration/migrate_8_4.asciidoc b/docs/reference/migration/migrate_8_4.asciidoc index d0a676c86aa0e..e3e1bee164a9a 100644 --- a/docs/reference/migration/migrate_8_4.asciidoc +++ b/docs/reference/migration/migrate_8_4.asciidoc @@ -9,9 +9,6 @@ your application to {es} 8.4. See also <> and <>. -coming::[8.4.0] - - [discrete] [[breaking-changes-8.4]] === Breaking changes @@ -20,3 +17,33 @@ coming::[8.4.0] There are no breaking changes in {es} 8.4. // end::notable-breaking-changes[] + +[discrete] +[[deprecated-8.4]] +=== Deprecations + +The following functionality has been deprecated in {es} 8.4 +and will be removed in a future version. +While this won't have an immediate impact on your applications, +we strongly encourage you to take the described steps to update your code +after upgrading to 8.4. + +To find out if you are using any deprecated functionality, +enable <>. + + +[discrete] +[[deprecations_84_rest_api]] +==== REST API deprecations + +[[deprecate_knn_search_endpoint]] +.Deprecate the `_knn_search` endpoint +[%collapsible] +==== +*Details* + +-| The kNN search API is deprecated in favor of the new 'knn' option inside the search API. The 'knn' option is now the recommended way of running ANN search. + +*Impact* + +Users should switch from `_knn_search` to the search `knn` option. +==== + diff --git a/docs/reference/ml/anomaly-detection/ml-configuring-aggregations.asciidoc b/docs/reference/ml/anomaly-detection/ml-configuring-aggregations.asciidoc index a7bfcfb57aba1..469f0bdb12b51 100644 --- a/docs/reference/ml/anomaly-detection/ml-configuring-aggregations.asciidoc +++ b/docs/reference/ml/anomaly-detection/ml-configuring-aggregations.asciidoc @@ -2,69 +2,126 @@ [[ml-configuring-aggregation]] = Aggregating data for faster performance -By default, {dfeeds} fetch data from {es} using search and scroll requests. -It can be significantly more efficient, however, to aggregate data in {es} -and to configure your {anomaly-jobs} to analyze aggregated data. +When you aggregate data, {es} automatically distributes the calculations across +your cluster. Then you can feed this aggregated data into the {ml-features} +instead of raw results. It reduces the volume of data that must be analyzed. -One of the benefits of aggregating data this way is that {es} automatically -distributes these calculations across your cluster. You can then feed this -aggregated data into the {ml-features} instead of raw results, which -reduces the volume of data that must be considered while detecting anomalies. -TIP: If you use a terms aggregation and the cardinality of a term is high but -still significantly less than your total number of documents, use -{ref}/search-aggregations-bucket-composite-aggregation.html[composite aggregations]. +[discrete] +[[aggs-requs-dfeeds]] +== Requirements + +There are a number of requirements for using aggregations in {dfeeds}. + +[discrete] +[[aggs-aggs]] +=== Aggregations + +* Your aggregation must include a `date_histogram` aggregation or a top level +`composite` aggregation, which in turn must contain a `max` aggregation on the +time field. It ensures that the aggregated data is a time series and the +timestamp of each bucket is the time of the last record in the bucket. + +* The `time_zone` parameter in the date histogram aggregation must be set to +`UTC`, which is the default value. + +* The name of the aggregation and the name of the field that it operates on need +to match. For example, if you use a `max` aggregation on a time field called +`responsetime`, the name of the aggregation must also be `responsetime`. + +* For `composite` aggregation support, there must be exactly one +`date_histogram` value source. That value source must not be sorted in +descending order. Additional `composite` aggregation value sources are allowed, +such as `terms`. + +* If you set the `summary_count_field_name` property to a non-null value, the +{anomaly-job} expects to receive aggregated input. The property must be set to +the name of the field that contains the count of raw data points that have been +aggregated. It applies to all detectors in the job. + +* The influencers or the partition fields must be included in the aggregation of +your {dfeed}, otherwise they are not included in the job analysis. For more +information on influencers, refer to <>. + + +[discrete] +[[aggs-interval]] +=== Intervals + +* The bucket span of your {anomaly-job} must be divisible by the value of the +`calendar_interval` or `fixed_interval` in your aggregation (with no remainder). + +* If you specify a `frequency` for your {dfeed}, it must be divisible by the +`calendar_interval` or the `fixed_interval`. + +* {anomaly-jobs-cap} cannot use `date_histogram` or `composite` aggregations +with an interval measured in months because the length of the month is not +fixed; they can use weeks or smaller units. + [discrete] [[aggs-limits-dfeeds]] -== Requirements and limitations - -There are some limitations to using aggregations in {dfeeds}. - -Your aggregation must include a `date_histogram` aggregation or a top level `composite` aggregation, -which in turn must contain a `max` aggregation on the time field. -This requirement ensures that the aggregated data is a time series and the timestamp -of each bucket is the time of the last record in the bucket. - -IMPORTANT: The name of the aggregation and the name of the field that it -operates on need to match, otherwise the aggregation doesn't work. For example, -if you use a `max` aggregation on a time field called `responsetime`, the name -of the aggregation must be also `responsetime`. - -You must consider the interval of the `date_histogram` or `composite` -aggregation carefully. The bucket span of your {anomaly-job} must be divisible -by the value of the `calendar_interval` or `fixed_interval` in your aggregation -(with no remainder). If you specify a `frequency` for your {dfeed}, -it must also be divisible by this interval. {anomaly-jobs-cap} cannot use -`date_histogram` or `composite` aggregations with an interval measured in months -because the length of the month is not fixed; they can use weeks or smaller units. - -TIP: As a rule of thumb, if your detectors use <> or -<> analytical functions, set the `date_histogram` or `composite` -aggregation interval to a tenth of the bucket span. This suggestion creates -finer, more granular time buckets, which are ideal for this type of analysis. If -your detectors use <> or <> -functions, set the interval to the same value as the bucket span. - -If your <> and -model plot is not enabled for the {anomaly-job}, neither the **Single Metric -Viewer** nor the **Anomaly Explorer** can plot and display an anomaly -chart for the job. In these cases, the charts are not visible and an explanatory -message is shown. - -Your {dfeed} can contain multiple aggregations, but only the ones with names +== Limitations + +* If your <> and +model plot is not enabled for the {anomaly-job}, neither the +**Single Metric Viewer** nor the **Anomaly Explorer** can plot and display an +anomaly chart. In these cases, an explanatory message is shown instead of the +chart. + +* Your {dfeed} can contain multiple aggregations, but only the ones with names that match values in the job configuration are fed to the job. + +[discrete] +[[aggs-recommendations-dfeeds]] +== Recommendations + +* When your detectors use <> or +<> analytical functions, it's recommended to set the +`date_histogram` or `composite` aggregation interval to a tenth of the bucket +span. This creates finer, more granular time buckets, which are ideal for this +type of analysis. + +* When your detectors use <> or +<> functions, set the interval to the same value as the +bucket span. + +* If you have multiple influencers or partition fields or if your field +cardinality is more than 1000, use +{ref}/search-aggregations-bucket-composite-aggregation.html[composite aggregations]. ++ +-- +To determine the cardinality of your data, you can run searches such as: + +[source,js] +-------------------------------------------------- +GET .../_search +{ + "aggs": { + "service_cardinality": { + "cardinality": { + "field": "service" + } + } + } +} +-------------------------------------------------- +// NOTCONSOLE +-- + + [discrete] [[aggs-using-date-histogram]] -=== Including aggregations in {anomaly-jobs} +== Including aggregations in {anomaly-jobs} -When you create or update an {anomaly-job}, you can include the names of -aggregations, for example: +When you create or update an {anomaly-job}, you can include aggregated fields in +the analysis configuration. In the {dfeed} configuration object, you can define +the aggregations. [source,console] ---------------------------------- -PUT _ml/anomaly_detectors/farequote +PUT _ml/anomaly_detectors/kibana-sample-data-flights { "analysis_config": { "bucket_span": "60m", @@ -73,13 +130,13 @@ PUT _ml/anomaly_detectors/farequote "field_name": "responsetime", <1> "by_field_name": "airline" <1> }], - "summary_count_field_name": "doc_count" + "summary_count_field_name": "doc_count" <2> }, "data_description": { "time_field":"time" <1> }, "datafeed_config":{ - "indices": ["farequote"], + "indices": ["kibana-sample-data-flights"], "aggregations": { "buckets": { "date_histogram": { @@ -88,16 +145,16 @@ PUT _ml/anomaly_detectors/farequote "time_zone": "UTC" }, "aggregations": { - "time": { <2> + "time": { <3> "max": {"field": "time"} }, - "airline": { <3> + "airline": { <4> "terms": { "field": "airline", "size": 100 }, "aggregations": { - "responsetime": { <4> + "responsetime": { <5> "avg": { "field": "responsetime" } @@ -115,46 +172,66 @@ PUT _ml/anomaly_detectors/farequote <1> The `airline`, `responsetime`, and `time` fields are aggregations. Only the aggregated fields defined in the `analysis_config` object are analyzed by the {anomaly-job}. -<2> The aggregations have names that match the fields that they operate on. The +<2> The `summary_count_field_name` property is set to the `doc_count` field that +is an aggregated field and contains the count of the aggregated data points. +<3> The aggregations have names that match the fields that they operate on. The `max` aggregation is named `time` and its field also needs to be `time`. -<3> The `term` aggregation is named `airline` and its field is also named +<4> The `term` aggregation is named `airline` and its field is also named `airline`. -<4> The `avg` aggregation is named `responsetime` and its field is also named +<5> The `avg` aggregation is named `responsetime` and its field is also named `responsetime`. -When the `summary_count_field_name` property is set to a non-null value, the job -expects to receive aggregated input. The property must be set to the name of the -field that contains the count of raw data points that have been aggregated. It -applies to all detectors in the job. +Use the following format to define a `date_histogram` aggregation to bucket by +time in your {dfeed}: + +[source,js] +---------------------------------- +"aggregations": { + ["bucketing_aggregation": { + "bucket_agg": { + ... + }, + "aggregations": { + "data_histogram_aggregation": { + "date_histogram": { + "field": "time", + }, + "aggregations": { + "timestamp": { + "max": { + "field": "time" + } + }, + [,"": { + "terms":{... + } + [,"aggregations" : { + []+ + } ] + }] + } + } + } + } +} +---------------------------------- +// NOTCONSOLE -TIP: If you are using a `term` aggregation to gather influencer or partition -field information, consider using a `composite` aggregation. It performs -better than a `date_histogram` with a nested `term` aggregation and also -includes all the values of the field instead of the top values per bucket. [discrete] [[aggs-using-composite]] -=== Using composite aggregations in {anomaly-jobs} - -For `composite` aggregation support, there must be exactly one `date_histogram` value -source. That value source must not be sorted in descending order. Additional -`composite` aggregation value sources are allowed, such as `terms`. - -NOTE: A {dfeed} that uses composite aggregations may not be as performant as -{dfeeds} that use scrolling or date histogram aggregations. Composite -aggregations are optimized for queries that are either `match_all` or `range` -filters. Other types of -queries may cause the `composite` aggregation to be inefficient. +== Composite aggregations -Here is an example that uses a `composite` aggregation instead of a -`date_histogram`. +Composite aggregations are optimized for queries that are either `match_all` or +`range` filters. Use composite aggregations in your {dfeeds} for these cases. +Other types of queries may cause the `composite` aggregation to be inefficient. -This is an example of a job with a {dfeed} that uses a `composite` aggregation -to bucket the metrics based on time and terms: +The following is an example of a job with a {dfeed} that uses a `composite` +aggregation to bucket the metrics based on time and terms: [source,console] ---------------------------------- -PUT _ml/anomaly_detectors/farequote-composite +PUT _ml/anomaly_detectors/kibana-sample-data-flights-composite { "analysis_config": { "bucket_span": "60m", @@ -169,7 +246,7 @@ PUT _ml/anomaly_detectors/farequote-composite "time_field":"time" }, "datafeed_config":{ - "indices": ["farequote"], + "indices": ["kibana-sample-data-flights"], "aggregations": { "buckets": { "composite": { @@ -210,9 +287,8 @@ PUT _ml/anomaly_detectors/farequote-composite } } ---------------------------------- -<1> Provide the `size` to the composite agg to control how many resources -are used when aggregating the data. A larger `size` means a faster {dfeed} but -more cluster resources are used when searching. +<1> The number of resources to use when aggregating the data. A larger `size` +means a faster {dfeed} but more cluster resources are used when searching. <2> The required `date_histogram` composite aggregation source. Make sure it is named differently than your desired time field. <3> Instead of using a regular `term` aggregation, adding a composite @@ -223,16 +299,55 @@ job analysis config. <5> The `avg` aggregation is named `responsetime` and its field is also named `responsetime`. + +Use the following format to define a composite aggregation in your {dfeed}: + +[source,js] +---------------------------------- +"aggregations": { + "composite_agg": { + "sources": [ + { + "date_histogram_agg": { + "field": "time", + ...settings... + } + }, + ...other valid sources... + ], + ...composite agg settings..., + "aggregations": { + "timestamp": { + "max": { + "field": "time" + } + }, + ...other aggregations... + [ + [,"aggregations" : { + []+ + } ] + }] + } + } +} +---------------------------------- +// NOTCONSOLE + + [discrete] [[aggs-dfeeds]] -== Nested aggregations in {dfeeds} +== Nested aggregations -{dfeeds-cap} support complex nested aggregations. This example uses the -`derivative` pipeline aggregation to find the first order derivative of the -counter `system.network.out.bytes` for each value of the field `beat.name`. +You can also use complex nested aggregations in {dfeeds}. -NOTE: `derivative` or other pipeline aggregations may not work within `composite` -aggregations. See +The next example uses the +{ref}/search-aggregations-pipeline-derivative-aggregation.html[`derivative` pipeline aggregation] +to find the first order derivative of the counter `system.network.out.bytes` for +each value of the field `beat.name`. + +NOTE: `derivative` or other pipeline aggregations may not work within +`composite` aggregations. See {ref}/search-aggregations-bucket-composite-aggregation.html#search-aggregations-bucket-composite-aggregation-pipeline-aggregations[composite aggregations and pipeline aggregations]. [source,js] @@ -275,11 +390,11 @@ aggregations. See [discrete] [[aggs-single-dfeeds]] -== Single bucket aggregations in {dfeeds} +== Single bucket aggregations -{dfeeds-cap} not only supports multi-bucket aggregations, but also single bucket -aggregations. The following shows two `filter` aggregations, each gathering the -number of unique entries for the `error` field. +You can also use single bucket aggregations in {dfeeds}. The following example +shows two `filter` aggregations, each gathering the number of unique entries for +the `error` field. [source,js] ---------------------------------- @@ -324,135 +439,3 @@ number of unique entries for the `error` field. ---------------------------------- // NOTCONSOLE - -[discrete] -[[aggs-define-dfeeds]] -== Defining aggregations in {dfeeds} - -When you define an aggregation in a {dfeed}, it must have one of the following forms: - -When using a `date_histogram` aggregation to bucket by time: -[source,js] ----------------------------------- -"aggregations": { - ["bucketing_aggregation": { - "bucket_agg": { - ... - }, - "aggregations": { - "data_histogram_aggregation": { - "date_histogram": { - "field": "time", - }, - "aggregations": { - "timestamp": { - "max": { - "field": "time" - } - }, - [,"": { - "terms":{... - } - [,"aggregations" : { - []+ - } ] - }] - } - } - } - } -} ----------------------------------- -// NOTCONSOLE - -When using a `composite` aggregation: - -[source,js] ----------------------------------- -"aggregations": { - "composite_agg": { - "sources": [ - { - "date_histogram_agg": { - "field": "time", - ...settings... - } - }, - ...other valid sources... - ], - ...composite agg settings..., - "aggregations": { - "timestamp": { - "max": { - "field": "time" - } - }, - ...other aggregations... - [ - [,"aggregations" : { - []+ - } ] - }] - } - } -} ----------------------------------- -// NOTCONSOLE - -The top level aggregation must be exclusively one of the following: - -* A {ref}/search-aggregations-bucket.html[bucket aggregation] containing a single -sub-aggregation that is a `date_histogram` -* A top level aggregation that is a `date_histogram` -* A top level aggregation is a `composite` aggregation - -There must be exactly one `date_histogram`, `composite` aggregation. For more information, see -{ref}/search-aggregations-bucket-datehistogram-aggregation.html[Date histogram aggregation] and -{ref}/search-aggregations-bucket-composite-aggregation.html[Composite aggregation]. - -NOTE: The `time_zone` parameter in the date histogram aggregation must be set to -`UTC`, which is the default value. - -Each histogram or composite bucket has a key, which is the bucket start time. -This key cannot be used for aggregations in {dfeeds}, however, because -they need to know the time of the latest record within a bucket. -Otherwise, when you restart a {dfeed}, it continues from the start time of the -histogram or composite bucket and possibly fetches the same data twice. -The max aggregation for the time field is therefore necessary to provide -the time of the latest record within a bucket. - -You can optionally specify a terms aggregation, which creates buckets for -different values of a field. - -IMPORTANT: If you use a terms aggregation, by default it returns buckets for -the top ten terms. Thus if the cardinality of the term is greater than 10, not -all terms are analyzed. In this case, consider using `composite` aggregations. - -You can change this behavior by setting the `size` parameter. To -determine the cardinality of your data, you can run searches such as: - -[source,js] --------------------------------------------------- -GET .../_search -{ - "aggs": { - "service_cardinality": { - "cardinality": { - "field": "service" - } - } - } -} --------------------------------------------------- -// NOTCONSOLE - - -By default, {es} limits the maximum number of terms returned to 10000. For high -cardinality fields, the query might not run. It might return errors related to -circuit breaking exceptions that indicate that the data is too large. In such -cases, use `composite` aggregations in your {dfeed}. For more information, see -{ref}/search-aggregations-bucket-terms-aggregation.html[Terms aggregation]. - -You can also optionally specify multiple sub-aggregations. The sub-aggregations -are aggregated for the buckets that were created by their parent aggregation. -For more information, see {ref}/search-aggregations.html[Aggregations]. diff --git a/docs/reference/ml/anomaly-detection/ml-configuring-alerts.asciidoc b/docs/reference/ml/anomaly-detection/ml-configuring-alerts.asciidoc index 5057efec24907..7afaf88081b20 100644 --- a/docs/reference/ml/anomaly-detection/ml-configuring-alerts.asciidoc +++ b/docs/reference/ml/anomaly-detection/ml-configuring-alerts.asciidoc @@ -61,7 +61,7 @@ You must select a type of {ml} result. In particular, you can create rules based on bucket, record, or influencer results. [role="screenshot"] -image::images/ml-anomaly-alert-severity.jpg["Selecting result type, severity, and test interval"] +image::images/ml-anomaly-alert-severity.jpg["Selecting result type, severity, and test interval", 500] For each rule, you can configure the `anomaly_score` that triggers the action. The `anomaly_score` indicates the significance of a given anomaly compared to diff --git a/docs/reference/ml/df-analytics/apis/put-dfanalytics.asciidoc b/docs/reference/ml/df-analytics/apis/put-dfanalytics.asciidoc index 114c379ddcfbc..e765502870459 100644 --- a/docs/reference/ml/df-analytics/apis/put-dfanalytics.asciidoc +++ b/docs/reference/ml/df-analytics/apis/put-dfanalytics.asciidoc @@ -699,7 +699,7 @@ The API returns the following result: ---- // TESTRESPONSE[s/1656364565517/$body.$_path/] -// TESTRESPONSE[s/"version": "8.4.0"/"version": $body.version/] +// TESTRESPONSE[s/"version" : "8.4.0"/"version": $body.version/] // TESTRESPONSE[s/"authorization" : \{[^}]*\},//] @@ -772,7 +772,7 @@ The API returns the following result: ---- // TESTRESPONSE[s/1656364845151/$body.$_path/] -// TESTRESPONSE[s/"version": "8.4.0"/"version": $body.version/] +// TESTRESPONSE[s/"version" : "8.4.0"/"version": $body.version/] // TESTRESPONSE[s/"authorization" : \{[^}]*\},//] // TESTRESPONSE[s/-3578554885299300212/$body.$_path/] diff --git a/docs/reference/ml/images/ml-anomaly-alert-severity.jpg b/docs/reference/ml/images/ml-anomaly-alert-severity.jpg index 0ad03eae066d6..dc6582ebbd84f 100644 Binary files a/docs/reference/ml/images/ml-anomaly-alert-severity.jpg and b/docs/reference/ml/images/ml-anomaly-alert-severity.jpg differ diff --git a/docs/reference/ml/trained-models/apis/start-trained-model-deployment.asciidoc b/docs/reference/ml/trained-models/apis/start-trained-model-deployment.asciidoc index ae4865dd9f08f..aefabbddf295c 100644 --- a/docs/reference/ml/trained-models/apis/start-trained-model-deployment.asciidoc +++ b/docs/reference/ml/trained-models/apis/start-trained-model-deployment.asciidoc @@ -71,7 +71,7 @@ Defaults to 1. Controls how many inference requests are allowed in the queue at a time. Every machine learning node in the cluster where the model can be allocated has a queue of this size; when the number of requests exceeds the total value, -new requests are rejected with a 429 error. Defaults to 1024. +new requests are rejected with a 429 error. Defaults to 1024. Max allowed value is 1000000. `threads_per_allocation`:: (Optional, integer) diff --git a/docs/reference/modules/discovery/publishing.asciidoc b/docs/reference/modules/discovery/publishing.asciidoc index 208386946d3fb..af664585085c2 100644 --- a/docs/reference/modules/discovery/publishing.asciidoc +++ b/docs/reference/modules/discovery/publishing.asciidoc @@ -1,38 +1,40 @@ [[cluster-state-publishing]] === Publishing the cluster state -The master node is the only node in a cluster that can make changes to the -cluster state. The master node processes one batch of cluster state updates at -a time, computing the required changes and publishing the updated cluster state -to all the other nodes in the cluster. Each publication starts with the master -broadcasting the updated cluster state to all nodes in the cluster. Each node -responds with an acknowledgement but does not yet apply the newly-received -state. Once the master has collected acknowledgements from enough -master-eligible nodes, the new cluster state is said to be _committed_ and the -master broadcasts another message instructing nodes to apply the now-committed -state. Each node receives this message, applies the updated state, and then -sends a second acknowledgement back to the master. +The elected master node is the only node in a cluster that can make changes to +the cluster state. The elected master node processes one batch of cluster state +updates at a time, computing the required changes and publishing the updated +cluster state to all the other nodes in the cluster. Each publication starts +with the elected master broadcasting the updated cluster state to all nodes in +the cluster. Each node responds with an acknowledgement but does not yet apply +the newly-received state. Once the elected master has collected +acknowledgements from enough master-eligible nodes, the new cluster state is +said to be _committed_ and the master broadcasts another message instructing +nodes to apply the now-committed state. Each node receives this message, +applies the updated state, and then sends a second acknowledgement back to the +master. -The master allows a limited amount of time for each cluster state update to be -completely published to all nodes. It is defined by the +The elected master allows a limited amount of time for each cluster state +update to be completely published to all nodes. It is defined by the `cluster.publish.timeout` setting, which defaults to `30s`, measured from the time the publication started. If this time is reached before the new cluster -state is committed then the cluster state change is rejected and the master -considers itself to have failed. It stands down and starts trying to elect a -new master. +state is committed then the cluster state change is rejected and the elected +master considers itself to have failed. It stands down and starts trying to +elect a new master node. If the new cluster state is committed before `cluster.publish.timeout` has -elapsed, the master node considers the change to have succeeded. It waits until -the timeout has elapsed or until it has received acknowledgements that each -node in the cluster has applied the updated state, and then starts processing -and publishing the next cluster state update. If some acknowledgements have not -been received (i.e. some nodes have not yet confirmed that they have applied -the current update), these nodes are said to be _lagging_ since their cluster -states have fallen behind the master's latest state. The master waits for the -lagging nodes to catch up for a further time, `cluster.follower_lag.timeout`, -which defaults to `90s`. If a node has still not successfully applied the -cluster state update within this time then it is considered to have failed and -is removed from the cluster. +elapsed, the elected master node considers the change to have succeeded. It +waits until the timeout has elapsed or until it has received acknowledgements +that each node in the cluster has applied the updated state, and then starts +processing and publishing the next cluster state update. If some +acknowledgements have not been received (i.e. some nodes have not yet confirmed +that they have applied the current update), these nodes are said to be +_lagging_ since their cluster states have fallen behind the elected master's +latest state. The elected master waits for the lagging nodes to catch up for a +further time, `cluster.follower_lag.timeout`, which defaults to `90s`. If a +node has still not successfully applied the cluster state update within this +time then it is considered to have failed and the elected master removes it +from the cluster. Cluster state updates are typically published as diffs to the previous cluster state, which reduces the time and network bandwidth needed to publish a cluster @@ -40,12 +42,19 @@ state update. For example, when updating the mappings for only a subset of the indices in the cluster state, only the updates for those indices need to be published to the nodes in the cluster, as long as those nodes have the previous cluster state. If a node is missing the previous cluster state, for example -when rejoining a cluster, the master will publish the full cluster state to -that node so that it can receive future updates as diffs. +when rejoining a cluster, the elected master will publish the full cluster +state to that node so that it can receive future updates as diffs. NOTE: {es} is a peer to peer based system, in which nodes communicate with one another directly. The high-throughput APIs (index, delete, search) do not -normally interact with the master node. The responsibility of the master node -is to maintain the global cluster state and reassign shards when nodes join or -leave the cluster. Each time the cluster state is changed, the new state is -published to all nodes in the cluster as described above. +normally interact with the elected master node. The responsibility of the +elected master node is to maintain the global cluster state which includes +reassigning shards when nodes join or leave the cluster. Each time the cluster +state is changed, the new state is published to all nodes in the cluster as +described above. + +The performance characteristics of cluster state updates are a function of the +speed of the storage on each master-eligible node, as well as the reliability +and latency of the network interconnections between all nodes in the cluster. +You must therefore ensure that the storage and networking available to the +nodes in your cluster are good enough to meet your performance goals. diff --git a/docs/reference/modules/http.asciidoc b/docs/reference/modules/http.asciidoc index 9c4925fb1298d..67818f91f20c8 100644 --- a/docs/reference/modules/http.asciidoc +++ b/docs/reference/modules/http.asciidoc @@ -6,7 +6,7 @@ independently of the <>. You can also configure both interfaces together using the <>. `http.host`:: -(<>) +(<>, string) Sets the address of this node for HTTP traffic. The node will bind to this address and will also use it as its HTTP publish address. Accepts an IP address, a hostname, or a <>. @@ -16,7 +16,7 @@ transport and HTTP interfaces. Defaults to the address given by `network.host`. `http.bind_host`:: -(<>) +(<>, string) The network address(es) to which the node should bind in order to listen for incoming HTTP connections. Accepts a list of IP addresses, hostnames, and <>. Defaults to the address given by @@ -26,7 +26,7 @@ binding, and you also require different binding configurations for the transport and HTTP interfaces. `http.publish_host`:: -(<>) +(<>, string) The network address for HTTP clients to contact the node using sniffing. Accepts an IP address, a hostname, or a <>. Defaults to the address given by `http.host` or @@ -36,27 +36,27 @@ and you also require different binding configurations for the transport and HTTP interfaces. `http.publish_port`:: -(<>) +(<>, integer) The port of the <>. Configure this setting only if you need the publish port to be different from `http.port`. Defaults to the port assigned via `http.port`. `http.max_content_length`:: -(<>) +(<>, <>) Maximum size of an HTTP request body. Defaults to `100mb`. `http.max_initial_line_length`:: -(<>) +(<>, <>) Maximum size of an HTTP URL. Defaults to `4kb`. `http.max_header_size`:: -(<>) +(<>, <>) Maximum size of allowed headers. Defaults to `16kb`. [[http-compression]] // tag::http-compression-tag[] `http.compression` {ess-icon}:: -(<>) +(<>, boolean) Support for compression when possible (with Accept-Encoding). If HTTPS is enabled, defaults to `false`. Otherwise, defaults to `true`. + Disabling compression for HTTPS mitigates potential security risks, such as a @@ -65,13 +65,13 @@ you must explicitly set `http.compression` to `true`. // end::http-compression-tag[] `http.compression_level`:: -(<>) +(<>, integer) Defines the compression level to use for HTTP responses. Valid values are in the range of 1 (minimum compression) and 9 (maximum compression). Defaults to `3`. [[http-cors-enabled]] // tag::http-cors-enabled-tag[] `http.cors.enabled` {ess-icon}:: -(<>) +(<>, boolean) Enable or disable cross-origin resource sharing, which determines whether a browser on another origin can execute requests against {es}. Set to `true` to enable {es} to process pre-flight {wikipedia}/Cross-origin_resource_sharing[CORS] requests. {es} will respond to those requests with the `Access-Control-Allow-Origin` header if the `Origin` sent in the request is permitted by the `http.cors.allow-origin` list. Set to `false` (the default) to make {es} ignore the `Origin` request header, effectively disabling CORS requests because {es} will never respond with the `Access-Control-Allow-Origin` response header. @@ -85,7 +85,7 @@ compromised. If CORS is not enabled on {es}, the only way for the client to know [[http-cors-allow-origin]] // tag::http-cors-allow-origin-tag[] `http.cors.allow-origin` {ess-icon}:: -(<>) +(<>, string) Which origins to allow. If you prepend and append a forward slash (`/`) to the value, this will be treated as a regular expression, allowing you to support HTTP and HTTPs. For example, using `/https?:\/\/localhost(:[0-9]+)?/` would return the request header appropriately in both cases. Defaults to no origins allowed. + IMPORTANT: A wildcard (`*`) is a valid value but is considered a security risk, as your {es} instance is open to cross origin requests from *anywhere*. @@ -95,28 +95,30 @@ IMPORTANT: A wildcard (`*`) is a valid value but is considered a security risk, [[http-cors-max-age]] // tag::http-cors-max-age-tag[] `http.cors.max-age` {ess-icon}:: -(<>) -Browsers send a "preflight" OPTIONS-request to determine CORS settings. `max-age` defines how long the result should be cached for. Defaults to `1728000` (20 days). +(<>, integer) +Browsers send a "preflight" OPTIONS-request to determine CORS settings. +`max-age` defines for how long, in seconds, the result should be cached. +Defaults to `1728000` (20 days). // end::http-cors-max-age-tag[] [[http-cors-allow-methods]] // tag::http-cors-allow-methods-tag[] `http.cors.allow-methods` {ess-icon}:: -(<>) +(<>, string) Which methods to allow. Defaults to `OPTIONS, HEAD, GET, POST, PUT, DELETE`. // end::http-cors-allow-methods-tag[] [[http-cors-allow-headers]] // tag::http-cors-allow-headers-tag[] `http.cors.allow-headers` {ess-icon}:: -(<>) +(<>, string) Which headers to allow. Defaults to `X-Requested-With, Content-Type, Content-Length`. // end::http-cors-allow-headers-tag[] [[http-cors-allow-credentials]] // tag::http-cors-allow-credentials-tag[] `http.cors.allow-credentials` {ess-icon}:: -(<>) +(<>, boolean) Whether the `Access-Control-Allow-Credentials` header should be returned. Defaults to `false`. + NOTE: This header is only returned when the setting is set to `true`. @@ -124,80 +126,86 @@ NOTE: This header is only returned when the setting is set to `true`. // end::http-cors-allow-credentials-tag[] `http.detailed_errors.enabled`:: -(<>) -If `true`, enables the output of detailed error messages and stack traces in the response output. Defaults to `true`. -+ -If `false`, use the `error_trace` parameter to <> and return detailed error messages. Otherwise, only a simple message will be returned. +(<>, boolean) +Configures whether detailed error reporting in HTTP responses is enabled. +Defaults to `true`, which means that HTTP requests that include the +<> will return a +detailed error message including a stack trace if they encounter an exception. +If set to `false`, requests with the `?error_trace` parameter are rejected. `http.pipelining.max_events`:: -(<>) +(<>, integer) The maximum number of events to be queued up in memory before an HTTP connection is closed, defaults to `10000`. `http.max_warning_header_count`:: -(<>) -The maximum number of warning headers in client HTTP responses. Defaults to `unbounded`. +(<>, integer) +The maximum number of warning headers in client HTTP responses. Defaults to +`-1` which means the number of warning headers is unlimited. `http.max_warning_header_size`:: -(<>) -The maximum total size of warning headers in client HTTP responses. Defaults to `unbounded`. - -`http.tcp.no_delay`:: -(<>) -Enable or disable the {wikipedia}/Nagle%27s_algorithm[TCP no delay] -setting. Defaults to `network.tcp.no_delay`. +(<>, <>) +The maximum total size of warning headers in client HTTP responses. Defaults to +`-1` which means the size of the warning headers is unlimited. `http.tcp.keep_alive`:: -(<>) -Configures the `SO_KEEPALIVE` option for this socket, which -determines whether it sends TCP keepalive probes. -Defaults to `network.tcp.keep_alive`. +(<>, boolean) +Configures the `SO_KEEPALIVE` option for this socket, which determines whether +it sends TCP keepalive probes. Defaults to `network.tcp.keep_alive`. `http.tcp.keep_idle`:: -(<>) Configures the `TCP_KEEPIDLE` option for this socket, which -determines the time in seconds that a connection must be idle before -starting to send TCP keepalive probes. Defaults to `network.tcp.keep_idle`, which -uses the system default. This value cannot exceed `300` seconds. Only applicable on -Linux and macOS, and requires Java 11 or newer. +(<>, integer) +Configures the `TCP_KEEPIDLE` option for HTTP sockets, which determines the +time in seconds that a connection must be idle before starting to send TCP +keepalive probes. Defaults to `network.tcp.keep_idle`, which uses the system +default. This value cannot exceed `300` seconds. Only applicable on Linux and +macOS. `http.tcp.keep_interval`:: -(<>) Configures the `TCP_KEEPINTVL` option for this socket, -which determines the time in seconds between sending TCP keepalive probes. -Defaults to `network.tcp.keep_interval`, which uses the system default. -This value cannot exceed `300` seconds. Only applicable on Linux and macOS, and requires -Java 11 or newer. +(<>, integer) +Configures the `TCP_KEEPINTVL` option for HTTP sockets, which determines the +time in seconds between sending TCP keepalive probes. Defaults to +`network.tcp.keep_interval`, which uses the system default. This value cannot +exceed `300` seconds. Only applicable on Linux and macOS. `http.tcp.keep_count`:: -(<>) Configures the `TCP_KEEPCNT` option for this socket, which -determines the number of unacknowledged TCP keepalive probes that may be -sent on a connection before it is dropped. Defaults to `network.tcp.keep_count`, -which uses the system default. Only applicable on Linux and macOS, and -requires Java 11 or newer. +(<>, integer) +Configures the `TCP_KEEPCNT` option for HTTP sockets, which determines the +number of unacknowledged TCP keepalive probes that may be sent on a connection +before it is dropped. Defaults to `network.tcp.keep_count`, which uses the +system default. Only applicable on Linux and macOS. + +`http.tcp.no_delay`:: +(<>, boolean) +Configures the `TCP_NODELAY` option on HTTP sockets, which determines whether +{wikipedia}/Nagle%27s_algorithm[TCP no delay] is enabled. Defaults to `true`. `http.tcp.reuse_address`:: -(<>) -Should an address be reused or not. Defaults to `network.tcp.reuse_address`. +(<>, boolean) +Configures the `SO_REUSEADDR` option for HTTP sockets, which determines whether +the address can be reused or not. Defaults to `false` on Windows and `true` +otherwise. `http.tcp.send_buffer_size`:: -(<>) -The size of the TCP send buffer (specified with <>). -Defaults to `network.tcp.send_buffer_size`. +(<>, <>) +The size of the TCP send buffer for HTTP traffic. Defaults to +`network.tcp.send_buffer_size`. `http.tcp.receive_buffer_size`:: -(<>) -The size of the TCP receive buffer (specified with <>). -Defaults to `network.tcp.receive_buffer_size`. +(<>, <>) +The size of the TCP receive buffer for HTTP traffic. Defaults to +`network.tcp.receive_buffer_size`. `http.client_stats.enabled`:: -(<>) +(<>, boolean) Enable or disable collection of HTTP client stats. Defaults to `true`. `http.client_stats.closed_channels.max_count`:: -(<>) +(<>, integer) When `http.client_stats.enabled` is `true`, sets the maximum number of closed HTTP channels for which {es} reports statistics. Defaults to `10000`. `http.client_stats.closed_channels.max_age`:: -(<>) +(<>, <>) When `http.client_stats.enabled` is `true`, sets the maximum length of time after closing a HTTP channel that {es} will report that channel's statistics. Defaults to `5m`. diff --git a/docs/reference/modules/indices/index_management.asciidoc b/docs/reference/modules/indices/index_management.asciidoc index 1cf8fbaed064e..cdb8af570c6d4 100644 --- a/docs/reference/modules/indices/index_management.asciidoc +++ b/docs/reference/modules/indices/index_management.asciidoc @@ -15,7 +15,7 @@ features. // tag::action-destructive-requires-name-tag[] `action.destructive_requires_name` {ess-icon}:: (<>) -When set to `true`, you must specify the index name to <>. It is not possible to delete all indices with `_all` or use wildcards. +When set to `true`, you must specify the index name to <>. It is not possible to delete all indices with `_all` or use wildcards. Defaults to `true`. // end::action-destructive-requires-name-tag[] [[cluster-indices-close-enable]] diff --git a/docs/reference/modules/network.asciidoc b/docs/reference/modules/network.asciidoc index e1d6500e7749d..240decbcca4d9 100644 --- a/docs/reference/modules/network.asciidoc +++ b/docs/reference/modules/network.asciidoc @@ -38,7 +38,7 @@ proceeding. Most users will need to configure only the following network settings. `network.host`:: -(<>) +(<>, string) Sets the address of this node for both HTTP and transport traffic. The node will bind to this address and will also use it as its publish address. Accepts an IP address, a hostname, or a <>. @@ -46,7 +46,7 @@ an IP address, a hostname, or a <>. Defaults to `_local_`. `http.port`:: -(<>) +(<>, integer) The port to bind for HTTP client communication. Accepts a single value or a range. If a range is specified, the node will bind to the first available port in the range. @@ -54,7 +54,7 @@ in the range. Defaults to `9200-9300`. `transport.port`:: -(<>) +(<>, integer) The port to bind for communication between nodes. Accepts a single value or a range. If a range is specified, the node will bind to the first available port in the range. Set this setting to a single port, not a range, on every @@ -170,7 +170,7 @@ you should not use them if you can use the <> instead. `network.bind_host`:: -(<>) +(<>, string) The network address(es) to which the node should bind in order to listen for incoming connections. Accepts a list of IP addresses, hostnames, and <>. Defaults to the address given by @@ -178,7 +178,7 @@ incoming connections. Accepts a list of IP addresses, hostnames, and different addresses for publishing and binding. `network.publish_host`:: -(<>) +(<>, string) The network address that clients and other nodes can use to contact this node. Accepts an IP address, a hostname, or a <>. Defaults to the address given by `network.host`. Use this setting only @@ -199,53 +199,53 @@ each node is accessible at all possible publish addresses. Use the following settings to control the low-level parameters of the TCP connections used by the HTTP and transport interfaces. -`network.tcp.no_delay`:: -(<>) -Enable or disable the {wikipedia}/Nagle%27s_algorithm[TCP no delay] -setting. Defaults to `true`. - `network.tcp.keep_alive`:: -(<>) -Configures the `SO_KEEPALIVE` option for this socket, which -determines whether it sends TCP keepalive probes. +(<>, boolean) +Configures the `SO_KEEPALIVE` option for network sockets, which determines +whether each connection sends TCP keepalive probes. Defaults to `true`. `network.tcp.keep_idle`:: -(<>) -Configures the `TCP_KEEPIDLE` option for this socket, which -determines the time in seconds that a connection must be idle before -starting to send TCP keepalive probes. Defaults to `-1`, which uses -the system default. This value cannot exceed `300` seconds. Only applicable on Linux and macOS, -and requires Java 11 or newer. +(<>, integer) +Configures the `TCP_KEEPIDLE` option for network sockets, which determines the +time in seconds that a connection must be idle before starting to send TCP +keepalive probes. Defaults to `-1`, which means to use the system default. This +value cannot exceed `300` seconds. Only applicable on Linux and macOS. `network.tcp.keep_interval`:: -(<>) -Configures the `TCP_KEEPINTVL` option for this socket, -which determines the time in seconds between sending TCP keepalive probes. -Defaults to `-1`, which uses the system default. This value cannot exceed `300` seconds. -Only applicable on Linux and macOS, and requires Java 11 or newer. +(<>, integer) +Configures the `TCP_KEEPINTVL` option for network sockets, which determines the +time in seconds between sending TCP keepalive probes. Defaults to `-1`, which +means to use the system default. This value cannot exceed `300` seconds. Only +applicable on Linux and macOS. `network.tcp.keep_count`:: -(<>) -Configures the `TCP_KEEPCNT` option for this socket, which -determines the number of unacknowledged TCP keepalive probes that may be -sent on a connection before it is dropped. Defaults to `-1`, -which uses the system default. Only applicable on Linux and macOS, and requires -Java 11 or newer. +(<>, integer) +Configures the `TCP_KEEPCNT` option for network sockets, which determines the +number of unacknowledged TCP keepalive probes that may be sent on a connection +before it is dropped. Defaults to `-1`, which means to use the system default. +Only applicable on Linux and macOS. + +`network.tcp.no_delay`:: +(<>, boolean) +Configures the `TCP_NODELAY` option on network sockets, which determines +whether {wikipedia}/Nagle%27s_algorithm[TCP no delay] is enabled. Defaults to +`true`. `network.tcp.reuse_address`:: -(<>) -Should an address be reused or not. Defaults to `true` on non-windows -machines. +(<>, boolean) +Configures the `SO_REUSEADDR` option for network sockets, which determines +whether the address can be reused or not. Defaults to `false` on Windows and +`true` otherwise. `network.tcp.send_buffer_size`:: -(<>) -The size of the TCP send buffer (specified with <>). -By default not explicitly set. +(<>, <>) +Configures the size of the TCP send buffer for network sockets. Defaults to +`-1` which means to use the system default. `network.tcp.receive_buffer_size`:: -(<>) -The size of the TCP receive buffer (specified with <>). -By default not explicitly set. +(<>, <>) +Configures the size of the TCP receive buffer. Defaults to `-1` which means to +use the system default. include::http.asciidoc[] diff --git a/docs/reference/modules/node.asciidoc b/docs/reference/modules/node.asciidoc index fc5e241064ed5..372af89af4a8a 100644 --- a/docs/reference/modules/node.asciidoc +++ b/docs/reference/modules/node.asciidoc @@ -194,13 +194,6 @@ High availability (HA) clusters require at least three master-eligible nodes, at least two of which are not voting-only nodes. Such a cluster will be able to elect a master node even if one of the nodes fails. -Since voting-only nodes never act as the cluster's elected master, they may -require less heap and a less powerful CPU than the true master nodes. -However all master-eligible nodes, including voting-only nodes, require -reasonably fast persistent storage and a reliable and low-latency network -connection to the rest of the cluster, since they are on the critical path for -<>. - Voting-only master-eligible nodes may also fill other roles in your cluster. For instance, a node may be both a data node and a voting-only master-eligible node. A _dedicated_ voting-only master-eligible nodes is a voting-only @@ -212,6 +205,20 @@ dedicated voting-only master-eligible node, set: node.roles: [ master, voting_only ] ------------------- +Since dedicated voting-only nodes never act as the cluster's elected master, +they may require less heap and a less powerful CPU than the true master nodes. +However all master-eligible nodes, including voting-only nodes, are on the +critical path for <>. Cluster state updates are usually independent of +performance-critical workloads such as indexing or searches, but they are +involved in management activities such as index creation and rollover, mapping +updates, and recovery after a failure. The performance characteristics of these +activities are a function of the speed of the storage on each master-eligible +node, as well as the reliability and latency of the network interconnections +between the elected master node and the other nodes in the cluster. You must +therefore ensure that the storage and networking available to the nodes in your +cluster are good enough to meet your performance goals. + [[data-node]] ==== Data node @@ -457,7 +464,7 @@ The contents of the `path.data` directory must persist across restarts, because this is where your data is stored. {es} requires the filesystem to act as if it were backed by a local disk, but this means that it will work correctly on properly-configured remote block devices (e.g. a SAN) and remote filesystems -(e.g. NFS) as long the remote storage behaves no differently from local +(e.g. NFS) as long as the remote storage behaves no differently from local storage. You can run multiple {es} nodes on the same filesystem, but each {es} node must have its own data path. diff --git a/docs/reference/modules/transport.asciidoc b/docs/reference/modules/transport.asciidoc index d97c2d432fa92..3f5a2ceecad7f 100644 --- a/docs/reference/modules/transport.asciidoc +++ b/docs/reference/modules/transport.asciidoc @@ -7,7 +7,7 @@ independently of the <>. Use the settings>> to configure both interfaces together. `transport.host`:: -(<>) +(<>, string) Sets the address of this node for transport traffic. The node will bind to this address and will also use it as its transport publish address. Accepts an IP address, a hostname, or a <>. @@ -17,7 +17,7 @@ transport and HTTP interfaces. Defaults to the address given by `network.host`. `transport.bind_host`:: -(<>) +(<>, string) The network address(es) to which the node should bind in order to listen for incoming transport connections. Accepts a list of IP addresses, hostnames, and <>. Defaults to the address given by @@ -27,7 +27,7 @@ binding, and you also require different binding configurations for the transport and HTTP interfaces. `transport.publish_host`:: -(<>) +(<>, string) The network address at which the node can be contacted by other nodes. Accepts an IP address, a hostname, or a <>. Defaults to the address given by `transport.host` or `network.publish_host`. @@ -36,19 +36,19 @@ different addresses for publishing and binding, and you also require different binding configurations for the transport and HTTP interfaces. `transport.publish_port`:: -(<>) +(<>, integer) The port of the <>. Set this parameter only if you need the publish port to be different from `transport.port`. Defaults to the port assigned via `transport.port`. `transport.connect_timeout`:: -(<>) +(<>, <>) The connect timeout for initiating a new connection (in time setting format). Defaults to `30s`. `transport.compress`:: -(<>) +(<>, string) Set to `true`, `indexing_data`, or `false` to configure transport compression between nodes. The option `true` will compress all data. The option `indexing_data` will compress only the raw index data sent between nodes during @@ -56,72 +56,71 @@ ingest, ccr following (excluding bootstrap), and operations based shard recovery (excluding transferring lucene files). Defaults to `indexing_data`. `transport.compression_scheme`:: -(<>) +(<>, string) Configures the compression scheme for `transport.compress`. The options are `deflate` or `lz4`. If `lz4` is configured and the remote node has not been upgraded to a version supporting `lz4`, the traffic will be sent uncompressed. Defaults to `lz4`. -`transport.ping_schedule`:: -(<>) -Schedule a regular application-level ping message -to ensure that transport connections between nodes are kept alive. Defaults to -`5s` in the transport client and `-1` (disabled) elsewhere. It is preferable -to correctly configure TCP keep-alives instead of using this feature, because -TCP keep-alives apply to all kinds of long-lived connections and not just to -transport connections. - -`transport.tcp.no_delay`:: -(<>) -Enable or disable the {wikipedia}/Nagle%27s_algorithm[TCP no delay] -setting. Defaults to `network.tcp.no_delay`. - `transport.tcp.keep_alive`:: -(<>) -Configures the `SO_KEEPALIVE` option for this socket, which -determines whether it sends TCP keepalive probes. -Defaults to `network.tcp.keep_alive`. +(<>, boolean) +Configures the `SO_KEEPALIVE` option for transport sockets, which determines +whether they send TCP keepalive probes. Defaults to `network.tcp.keep_alive`. `transport.tcp.keep_idle`:: -(<>) -Configures the `TCP_KEEPIDLE` option for this socket, which -determines the time in seconds that a connection must be idle before -starting to send TCP keepalive probes. Defaults to `network.tcp.keep_idle` if set, -or the system default otherwise. -This value cannot exceed `300` seconds. In cases where the system default -is higher than `300`, the value is automatically lowered to `300`. Only applicable on -Linux and macOS, and requires Java 11 or newer. +(<>, integer) +Configures the `TCP_KEEPIDLE` option for transport sockets, which determines +the time in seconds that a connection must be idle before starting to send TCP +keepalive probes. Defaults to `network.tcp.keep_idle` if set, or the system +default otherwise. This value cannot exceed `300` seconds. In cases where the +system default is higher than `300`, the value is automatically lowered to +`300`. Only applicable on Linux and macOS. `transport.tcp.keep_interval`:: -(<>) -Configures the `TCP_KEEPINTVL` option for this socket, -which determines the time in seconds between sending TCP keepalive probes. -Defaults to `network.tcp.keep_interval` if set, or the system default otherwise. -This value cannot exceed `300` seconds. In cases where the system default is higher than `300`, -the value is automatically lowered to `300`. Only applicable on Linux and macOS, -and requires Java 11 or newer. +(<>, integer) +Configures the `TCP_KEEPINTVL` option for transport sockets, which determines +the time in seconds between sending TCP keepalive probes. Defaults to +`network.tcp.keep_interval` if set, or the system default otherwise. This value +cannot exceed `300` seconds. In cases where the system default is higher than +`300`, the value is automatically lowered to `300`. Only applicable on Linux +and macOS. `transport.tcp.keep_count`:: -(<>) -Configures the `TCP_KEEPCNT` option for this socket, which -determines the number of unacknowledged TCP keepalive probes that may be -sent on a connection before it is dropped. Defaults to `network.tcp.keep_count` -if set, or the system default otherwise. Only applicable on Linux and macOS, and -requires Java 11 or newer. +(<>, integer) +Configures the `TCP_KEEPCNT` option for transport sockets, which determines the +number of unacknowledged TCP keepalive probes that may be sent on a connection +before it is dropped. Defaults to `network.tcp.keep_count` if set, or the +system default otherwise. Only applicable on Linux and macOS. + +`transport.tcp.no_delay`:: +(<>, boolean) +Configures the `TCP_NODELAY` option on transport sockets, which determines +whether {wikipedia}/Nagle%27s_algorithm[TCP no delay] is enabled. Defaults to +`true`. `transport.tcp.reuse_address`:: -(<>) -Should an address be reused or not. Defaults to `network.tcp.reuse_address`. +(<>, boolean) +Configures the `SO_REUSEADDR` option for network sockets, which determines +whether the address can be reused or not. Defaults to +`network.tcp.reuse_address`. `transport.tcp.send_buffer_size`:: -(<>) -The size of the TCP send buffer (specified with <>). -Defaults to `network.tcp.send_buffer_size`. +(<>, <>) +The size of the TCP send buffer for transport traffic. Defaults to +`network.tcp.send_buffer_size`. `transport.tcp.receive_buffer_size`:: -(<>) -The size of the TCP receive buffer (specified with <>). -Defaults to `network.tcp.receive_buffer_size`. +(<>, <>) +The size of the TCP receive buffer for transport traffic. Defaults to +`network.tcp.receive_buffer_size`. + +`transport.ping_schedule`:: +(<>, <>) +Configures the time between sending application-level pings on all transport +connections to promptly detect when a transport connection has failed. Defaults +to `-1` meaning that application-level pings are not sent. You should use TCP +keepalives (see `transport.tcp.keep_alive`) instead of application-level pings +wherever possible. [[transport-profiles]] ===== Transport profiles diff --git a/docs/reference/query-dsl/geo-bounding-box-query.asciidoc b/docs/reference/query-dsl/geo-bounding-box-query.asciidoc index 3fb6c11181a19..5d132ee90bcda 100644 --- a/docs/reference/query-dsl/geo-bounding-box-query.asciidoc +++ b/docs/reference/query-dsl/geo-bounding-box-query.asciidoc @@ -10,7 +10,7 @@ intersect a bounding box. [discrete] [[geo-bounding-box-query-ex]] ==== Example -Assume the following the following documents are indexed: +Assume the following documents are indexed: [source,console] -------------------------------------------------- diff --git a/docs/reference/query-dsl/geo-distance-query.asciidoc b/docs/reference/query-dsl/geo-distance-query.asciidoc index 5c1b0a1ecfc3f..5fc39a415acab 100644 --- a/docs/reference/query-dsl/geo-distance-query.asciidoc +++ b/docs/reference/query-dsl/geo-distance-query.asciidoc @@ -11,7 +11,7 @@ a given distance of a geopoint. [[geo-distance-query-ex]] ==== Example -Assume the following the following documents are indexed: +Assume the following documents are indexed: [source,console] -------------------------------------------------- diff --git a/docs/reference/query-dsl/multi-match-query.asciidoc b/docs/reference/query-dsl/multi-match-query.asciidoc index b27cdb4c548b4..a7db337004dcd 100644 --- a/docs/reference/query-dsl/multi-match-query.asciidoc +++ b/docs/reference/query-dsl/multi-match-query.asciidoc @@ -388,11 +388,12 @@ explanation: Also, accepts `analyzer`, `boost`, `operator`, `minimum_should_match`, `lenient` and `zero_terms_query`. -WARNING: The `cross_fields` type blends field statistics in a way that does -not always produce well-formed scores (for example scores can become -negative). As an alternative, you can consider the -<> query, which is also -term-centric but combines field statistics in a more robust way. +WARNING: The `cross_fields` type blends field statistics in a complex way that +can be hard to interpret. The score combination can even be incorrect, in +particular when some documents contain some of the search fields, but not all +of them. You should consider the +<> query as an alternative, +which is also term-centric but combines field statistics in a more robust way. [[cross-field-analysis]] ===== `cross_field` and analysis diff --git a/docs/reference/query-dsl/regexp-syntax.asciidoc b/docs/reference/query-dsl/regexp-syntax.asciidoc index 270f6fe79e662..28c9c882542c1 100644 --- a/docs/reference/query-dsl/regexp-syntax.asciidoc +++ b/docs/reference/query-dsl/regexp-syntax.asciidoc @@ -37,7 +37,38 @@ backslash or surround it with double quotes. For example: \\ # renders as a literal '\' "john@smith.com" # renders as 'john@smith.com' .... - + +[NOTE] +==== + +The backslash is an escape character in both JSON strings and regular +expressions. You need to escape both backslashes in a query, unless you use a +language client, which takes care of this. For example, the string `a\b` needs +to be indexed as `"a\\b"`: + +[source,console] +-------------------------------------------------- +PUT my-index-000001/_doc/1 +{ + "my_field": "a\\b" +} +-------------------------------------------------- + +This document matches the following `regexp` query: + +[source,console] +-------------------------------------------------- +GET my-index-000001/_search +{ + "query": { + "regexp": { + "my_field.keyword": "a\\\\.*" + } + } +} +-------------------------------------------------- +//TEST[continued] +==== [discrete] [[regexp-standard-operators]] diff --git a/docs/reference/release-notes.asciidoc b/docs/reference/release-notes.asciidoc index 892577f0c6726..aff526b9a4a7a 100644 --- a/docs/reference/release-notes.asciidoc +++ b/docs/reference/release-notes.asciidoc @@ -6,6 +6,8 @@ This section summarizes the changes in each release. +* <> +* <> * <> * <> * <> @@ -29,6 +31,8 @@ This section summarizes the changes in each release. -- +include::release-notes/8.4.2.asciidoc[] +include::release-notes/8.4.1.asciidoc[] include::release-notes/8.4.0.asciidoc[] include::release-notes/8.3.3.asciidoc[] include::release-notes/8.3.2.asciidoc[] diff --git a/docs/reference/release-notes/8.3.0.asciidoc b/docs/reference/release-notes/8.3.0.asciidoc index a60e66e63239c..fd74c19046d81 100644 --- a/docs/reference/release-notes/8.3.0.asciidoc +++ b/docs/reference/release-notes/8.3.0.asciidoc @@ -1,8 +1,6 @@ [[release-notes-8.3.0]] == {es} version 8.3.0 -coming[8.3.0] - Also see <>. [[bug-8.3.0]] diff --git a/docs/reference/release-notes/8.4.0.asciidoc b/docs/reference/release-notes/8.4.0.asciidoc index fa51fbc255656..e83f9fb834723 100644 --- a/docs/reference/release-notes/8.4.0.asciidoc +++ b/docs/reference/release-notes/8.4.0.asciidoc @@ -1,8 +1,6 @@ [[release-notes-8.4.0]] == {es} version 8.4.0 -coming[8.4.0] - Also see <>. [[bug-8.4.0]] @@ -10,10 +8,16 @@ Also see <>. === Bug fixes Aggregations:: +* Fix multi-value handling in composite agg {es-pull}88638[#88638] +* Fix: extract matrix stats using `bucket_selector` `buckets_path` {es-pull}88271[#88271] (issue: {es-issue}87454[#87454]) * Make the metric in the `buckets_path` parameter optional {es-pull}87220[#87220] (issue: {es-issue}72983[#72983]) +* Propagate alias filters to significance aggs filters {es-pull}88221[#88221] (issue: {es-issue}81585[#81585]) Allocation:: * Clamp auto-expand replicas to the closest value {es-pull}87505[#87505] (issue: {es-issue}84788[#84788]) +* Prevent re-balancing using outdated node weights in some cases {es-pull}88385[#88385] (issue: {es-issue}88384[#88384]) +* Remove any existing `read_only_allow_delete` index blocks when `cluster.routing.allocation.disk.threshold_enabled` is set to `false` {es-pull}87841[#87841] (issue: {es-issue}86383[#86383]) +* Replace health request with a state observer {es-pull}88641[#88641] Authentication:: * Fix unique realm name check to cover default realms {es-pull}87999[#87999] @@ -22,12 +26,30 @@ Authorization:: * Add rollover permissions for `remote_monitoring_agent` {es-pull}87717[#87717] (issue: {es-issue}84161[#84161]) Autoscaling:: +* Autoscaling during shrink {es-pull}88292[#88292] (issue: {es-issue}85480[#85480]) * Do not include desired nodes in snapshots {es-pull}87695[#87695] +Cluster Coordination:: +* Improve rejection of ambiguous voting config name {es-pull}89239[#89239] + +Data streams:: +* Fix renaming data streams with CCR replication {es-pull}88875[#88875] (issue: {es-issue}81751[#81751]) + +Distributed:: +* Fixed NullPointerException on bulk request {es-pull}88385[#88385] + EQL:: * Avoid attempting PIT close on PIT open failure {es-pull}87498[#87498] +* Improve EQL Sequence circuit breaker precision {es-pull}88538[#88538] (issue: {es-issue}88300[#88300]) + +Geo:: +* Geo_line aggregation returns a geojson point when the resulting line has only one point {es-pull}89199[#89199] (issue: {es-issue}85748[#85748]) +* Sort ranges in `geo_distance` aggregation {es-pull}89154[#89154] (issue: {es-issue}89147[#89147]) Health:: +* Fix NPE when checking if the last snapshot was success {es-pull}88811[#88811] +* Fixing a version check for master stability functionality {es-pull}89322[#89322] +* Fixing internal action names {es-pull}89182[#89182] * Using the correct connection to fetch remote master history {es-pull}87299[#87299] Highlighting:: @@ -36,59 +58,112 @@ Highlighting:: ILM+SLM:: * Batch ILM move to retry step task update {es-pull}86759[#86759] +Infra/CLI:: +* Quote paths with whitespace in Windows service CLIs {es-pull}89072[#89072] (issue: {es-issue}89043[#89043]) + Infra/Core:: +* Always close directory streams {es-pull}88560[#88560] +* Delete invalid settings for system indices {es-pull}88903[#88903] (issue: {es-issue}88324[#88324]) * Disallow three-digit minor and revision versions {es-pull}87338[#87338] +* Handle snapshot restore in file settings {es-pull}89321[#89321] (issue: {es-issue}89183[#89183]) +* System indices ignore all user templates {es-pull}87260[#87260] (issues: {es-issue}42508[#42508], {es-issue}74271[#74271]) -Ingest:: -* Don't ignore pipeline for upserts in bulk api {es-pull}87719[#87719] (issue: {es-issue}87131[#87131]) -* Geoip processor should respect the `ignore_missing` in case of missing database {es-pull}87793[#87793] (issue: {es-issue}87345[#87345]) +Infra/Node Lifecycle:: +* Fix message for stalled shutdown {es-pull}89254[#89254] + +Infra/Plugins:: +* Disable URL connection caching in SPIClassIterator {es-pull}88586[#88586] (issue: {es-issue}88275[#88275]) + +Infra/Scripting:: +* Script: `UpdateByQuery` can read doc version if requested {es-pull}88740[#88740] Machine Learning:: -* Improve trained model stats API performance {es-pull}87978[#87978] +* Address potential bug where trained models get stuck in starting after being allocated to node {es-pull}88945[#88945] +* Fix BERT and MPNet tokenization bug when handling unicode accents {es-pull}88907[#88907] (issue: {es-issue}88900[#88900]) +* Fix NLP `question_answering` task when best answer is only one token {es-pull}88347[#88347] +* Include start params in `_stats` for non-started model deployments {es-pull}89091[#89091] +* Fix minor tokenization bug when using fill_mask task with roberta tokenizer {es-pull}88825[#88825] +* Fix potential cause of classification and regression job failures {ml-pull}2385[#2385] + +Mapping:: +* Assign the right path to objects merged when parsing mappings {es-pull}89389[#89389] (issue: {es-issue}88573[#88573]) +* Don't modify source map when parsing composite runtime field {es-pull}89114[#89114] + +Network:: +* Ensure that the extended socket options TCP_KEEPXXX are available {es-pull}88935[#88935] (issue: {es-issue}88897[#88897]) SQL:: +* Fix `SqlSearchIT` `testAllTypesWithRequestToOldNodes` {es-pull}88883[#88883] (issue: {es-issue}88866[#88866]) * Fix date range checks {es-pull}87151[#87151] (issue: {es-issue}77179[#77179]) +* fix object equals {es-pull}87887[#87887] + +Search:: +* Fix: use status code 500 for aggregation reduce phase errors if no shard failed {es-pull}88551[#88551] (issue: {es-issue}20004[#20004]) +* Override bulk visit methods of exitable point visitor {es-pull}82120[#82120] + +Security:: +* Ensure `secureString` remain open when reloading secure settings {es-pull}88922[#88922] Snapshot/Restore:: -* Use the provided SAS token without SDK sanitation that can produce invalid signatures {es-pull}88155[#88155] (issue: {es-issue}88140[#88140]) +* Fix queued snapshot assignments after partial snapshot fails due to delete {es-pull}88470[#88470] (issue: {es-issue}86724[#86724]) Transform:: -* Execute `_refresh` separately from DBQ, with system permissions {es-pull}88005[#88005] (issue: {es-issue}88001[#88001]) +* Handle update error correctly {es-pull}88619[#88619] + +[[deprecation-8.4.0]] +[float] +=== Deprecations + +Vector Search:: +* Deprecate the `_knn_search` endpoint {es-pull}88828[#88828] [[enhancement-8.4.0]] [float] === Enhancements Aggregations:: +* Adding cardinality support for `random_sampler` agg {es-pull}86838[#86838] * Minor `RangeAgg` optimization {es-pull}86935[#86935] (issue: {es-issue}84262[#84262]) * Speed counting filters/range/date_histogram aggs {es-pull}81322[#81322] * Update bucket metric pipeline agg paths to allow intermediate single bucket and bucket qualified multi-bucket aggs {es-pull}85729[#85729] Allocation:: * Add debug information to `ReactiveReason` about assigned and unassigned shards {es-pull}86132[#86132] (issue: {es-issue}85243[#85243]) +* Optimize log cluster health performance. {es-pull}87723[#87723] * Use desired nodes during data tier allocation decisions {es-pull}87735[#87735] Audit:: +* Audit API key ID when create or grant API keys {es-pull}88456[#88456] +* Include API key metadata in audit log when an API key is created, granted, or updated {es-pull}88642[#88642] +* Updatable API keys - logging audit trail event {es-pull}88276[#88276] * User Profile - audit support for security domain {es-pull}87097[#87097] +Authentication:: +* If signature validation fails, reload JWKs and retry if new JWKs are found {es-pull}88023[#88023] + Authorization:: * App permissions with action patterns do not retrieve privileges {es-pull}85455[#85455] * Cancellable Profile Has Privilege check {es-pull}87224[#87224] * Return action denied error when user with insufficient privileges (`manage_own_api_key`) attempts a grant API key request {es-pull}87461[#87461] (issue: {es-issue}87438[#87438]) +* Update indices permissions to Enterprise Search service account {es-pull}88703[#88703] Autoscaling:: * Add processors to autoscaling capacity response {es-pull}87895[#87895] * Keep track of desired nodes status in cluster state {es-pull}87474[#87474] Cluster Coordination:: +* Deduplicate mappings in persisted cluster state {es-pull}88479[#88479] * Expose segment details in PCSS debug log {es-pull}87412[#87412] +* Periodic warning for 1-node cluster w/ seed hosts {es-pull}88013[#88013] (issue: {es-issue}85222[#85222]) * Report overall mapping size in cluster stats {es-pull}87556[#87556] Data streams:: * Give doc-value-only mappings to numeric fields on metrics templates {es-pull}87100[#87100] Distributed:: +* Adding the ability to register a `PeerFinderListener` to Coordinator {es-pull}88626[#88626] * Make Desired Nodes API operator-only {es-pull}87778[#87778] (issue: {es-issue}87777[#87777]) +* Support "dry run" mode for updating Desired Nodes {es-pull}88305[#88305] FIPS:: * Log warning when hash function used by cache is not recommended in FIPS mode {es-pull}86740[#86740] @@ -96,17 +171,26 @@ FIPS:: Geo:: * Optimize geogrid aggregations for singleton points {es-pull}87439[#87439] +* Support cartesian shape with doc values {es-pull}88487[#88487] * Use a faster but less accurate log algorithm for computing Geotile Y coordinate {es-pull}87515[#87515] +* Use faster maths to project WGS84 to mercator {es-pull}88231[#88231] Health:: +* Add health user action for unhealthy SLM policy failure counts {es-pull}88523[#88523] * Adding a transport action to get cluster formation info {es-pull}87306[#87306] * Adding additional capability to the `master_is_stable` health indicator service {es-pull}87482[#87482] * Creating a transport action for the `CoordinationDiagnosticsService` {es-pull}87984[#87984] * Move the master stability logic into its own service separate from the `HealthIndicatorService` {es-pull}87672[#87672] +* Polling cluster formation state for master-is-stable health indicator {es-pull}88397[#88397] * Remove cluster block preflight check from health api {es-pull}87520[#87520] (issue: {es-issue}87464[#87464]) +ILM+SLM:: +* Add min_* conditions to rollover {es-pull}83345[#83345] +* Track the count of failed invocations since last successful policy snapshot {es-pull}88398[#88398] + Infra/Core:: * Improve console exception messages {es-pull}87942[#87942] +* Print full exception when console is non-interactive {es-pull}88297[#88297] * Stop making index read-only when executing force merge index lifecycle management action {es-pull}81162[#81162] (issue: {es-issue}81162[#81162]) * Stream input and output support for optional collections {es-pull}88127[#88127] * Update version of internal http client {es-pull}87491[#87491] @@ -114,6 +198,13 @@ Infra/Core:: Infra/Logging:: * Catch an exception when formatting a string fails {es-pull}87132[#87132] +Infra/Scripting:: +* Script: Add Metadata to ingest context {es-pull}87309[#87309] +* Script: Metadata for update context {es-pull}88333[#88333] + +Infra/Settings:: +* Convert disk watermarks to RelativeByteSizeValues {es-pull}88719[#88719] + Ingest:: * Allow pipeline processor to ignore missing pipelines {es-pull}87354[#87354] * Move the ingest attachment processor to the default distribution {es-pull}87989[#87989] @@ -122,10 +213,21 @@ Ingest:: Machine Learning:: * Add authorization info to ML config listings {es-pull}87884[#87884] +* Add deployed native models to `inference_stats` in trained model stats response {es-pull}88187[#88187] +* Add inference cache hit count to inference node stats {es-pull}88807[#88807] +* Add new `cache_size` parameter to `trained_model` deployments API {es-pull}88450[#88450] * Expand allowed NER labels to be any I-O-B tagged labels {es-pull}87091[#87091] * Improve scalability of NLP models {es-pull}87366[#87366] +* Indicate overall deployment failure if all node routes are failed {es-pull}88378[#88378] +* New `frequent_items` aggregation {es-pull}83055[#83055] +* Fairer application of size penalty for model selection for training classification and regression models {ml-pull}2291[#2291] +* Accelerate training for data frame analytics by skipping fine parameter tuning if it is unnecessary {ml-pull}2298[#2298] +* Address some causes of high runtimes training regression and classification models on large data sets with many features {ml-pull}2332[#2332] +* Add caching for PyTorch inference {ml-pull}2305[#2305] +* Improve accuracy of anomaly detection median estimation {ml-pull}2367[#2367] (issue: {ml-issue}2364[#2364]) Mapping:: +* Enable synthetic source support on constant keyword fields {es-pull}88603[#88603] * Speed up `NumberFieldMapper` {es-pull}85688[#85688] Monitoring:: @@ -133,9 +235,7 @@ Monitoring:: Network:: * Allow start cluster with unreachable remote clusters {es-pull}87298[#87298] - -Performance:: -* Warn about impact of large readahead on search {es-pull}88007[#88007] +* Increase `http.max_header_size` default to 16kb {es-pull}88725[#88725] (issue: {es-issue}88501[#88501]) Query Languages:: * Add support for VERSION field type in SQL and EQL {es-pull}87590[#87590] (issue: {es-issue}83375[#83375]) @@ -145,18 +245,33 @@ Rollup:: SQL:: * Implement support for partial search results in SQL CLI {es-pull}86982[#86982] (issue: {es-issue}86082[#86082]) +* Update Tableau connector to use connection dialog v2 {es-pull}88462[#88462] Search:: * Add mapping stats for indexed `dense_vectors` {es-pull}86859[#86859] +* Improve error when sorting on incompatible types {es-pull}88399[#88399] (issue: {es-issue}73146[#73146]) +* Support kNN vectors in disk usage action {es-pull}88785[#88785] (issue: {es-issue}84801[#84801]) Security:: -* Automatically close idle connections in OIDC back-channel {es-pull}87773[#87773] +* Add setting for `tcp_keepalive` for oidc back-channel {es-pull}87868[#87868] +* Support `run_as` another user when granting API keys {es-pull}88335[#88335] * Support exists query for API key query {es-pull}87229[#87229] +* Updatable API keys - REST API spec and tests {es-pull}88270[#88270] +* Updatable API keys - noop check {es-pull}88346[#88346] Snapshot/Restore:: +* INFO logging of snapshot restore and completion {es-pull}88257[#88257] (issue: {es-issue}86610[#86610]) * Make snapshot deletes not block the repository during data blob deletes {es-pull}86514[#86514] +* Retry after all S3 get failures that made progress {es-pull}88015[#88015] (issue: {es-issue}87243[#87243]) +* Speed up creating new `IndexMetaDataGenerations` without removed snapshots {es-pull}88344[#88344] * Update HDFS Repository to HDFS 3.3.3 {es-pull}88039[#88039] +Stats:: +* Sort ingest pipeline stats by use {es-pull}88035[#88035] + +TLS:: +* Add issuer to GET _ssl/certificates {es-pull}88445[#88445] + Transform:: * Add authorization info to transform config listings {es-pull}87570[#87570] * Implement per-transform num_failure_retries setting {es-pull}87361[#87361] @@ -165,16 +280,38 @@ Transform:: [float] === New features +Authentication:: +* Support updates of API key attributes (single operation route) {es-pull}88186[#88186] + Health:: * Master stability health indicator part 1 (when a master has been seen recently) {es-pull}86524[#86524] +* Remove help_url,rename summary to symptom, and `user_actions` to diagnosis {es-pull}88553[#88553] (issue: {es-issue}88474[#88474]) + +Infra/Core:: +* File Settings Service {es-pull}88329[#88329] Infra/Logging:: * Stable logging API - the basic use case {es-pull}86612[#86612] +Machine Learning:: +* Make composite aggs in datafeeds Generally Available {es-pull}88589[#88589] + +Search:: +* Add 'mode' option to `_source` field mapper {es-pull}88211[#88211] + +TSDB:: +* TSDB: Implement downsampling ILM Action for time-series indices {es-pull}87269[#87269] (issue: {es-issue}68609[#68609]) + +Vector Search:: +* Integrate ANN into `_search` endpoint {es-pull}88694[#88694] (issue: {es-issue}87625[#87625]) + [[upgrade-8.4.0]] [float] === Upgrades +Infra/Core:: +* Upgrade to Log4J 2.18.0 {es-pull}88237[#88237] + Network:: * Upgrade to Netty 4.1.77 {es-pull}86630[#86630] diff --git a/docs/reference/release-notes/8.4.1.asciidoc b/docs/reference/release-notes/8.4.1.asciidoc new file mode 100644 index 0000000000000..aa8e9b7ccf6bc --- /dev/null +++ b/docs/reference/release-notes/8.4.1.asciidoc @@ -0,0 +1,13 @@ +[[release-notes-8.4.1]] +== {es} version 8.4.1 + +Also see <>. + +[[bug-8.4.1]] +[float] +=== Bug fixes + +Machine Learning:: +* [ML] Validate trained model deployment `queue_capacity` limit {es-pull}89611[#89611] (issue: {es-issue}89555[#89555]) + + diff --git a/docs/reference/release-notes/8.4.2.asciidoc b/docs/reference/release-notes/8.4.2.asciidoc new file mode 100644 index 0000000000000..e74ce8470137a --- /dev/null +++ b/docs/reference/release-notes/8.4.2.asciidoc @@ -0,0 +1,69 @@ +[[release-notes-8.4.2]] +== {es} version 8.4.2 + +Also see <>. + +[[bug-8.4.2]] +[float] +=== Bug fixes + +Allocation:: +* Fix debug mode in `MaxRetryAllocationDecider` {es-pull}89973[#89973] + +Authentication:: +* Fix double sending of response in `TransportOpenIdConnectPrepareAuthenticationAction` {es-pull}89930[#89930] + +Autoscaling:: +* Fix issue with autoscaling after a clone or split {es-pull}89768[#89768] (issue: {es-issue}89758[#89758]) + +Health:: +* Fix the conditions for fetching remote master history {es-pull}89472[#89472] (issue: {es-issue}89431[#89431]) + +ILM+SLM:: +* Copy `isHidden` during ILM alias swap {es-pull}89650[#89650] (issue: {es-issue}89604[#89604]) + +Infra/Core:: +* Extend the date rounding logic to be conditional {es-pull}89693[#89693] (issues: {es-issue}89096[#89096], {es-issue}58986[#58986]) +* Fix `FileSettingsService` hang on error update {es-pull}89630[#89630] +* Implement fix to terminate file Watcher thread to avoid deadlock {es-pull}89934[#89934] + +Ingest Node:: +* Fix pipeline `id` not present in ingest metadata inside `on_failure` block {es-pull}89632[#89632] + +Machine Learning:: +* Fix memory leak in `TransportDeleteExpiredDataAction` {es-pull}89935[#89935] +* Do not retain categorization tokens when existing category matches {ml-pull}2398[#2398] + +Network:: +* Fix memory leak when double invoking `RestChannel.sendResponse` {es-pull}89873[#89873] + +Ranking:: +* Avoid negative scores with `cross_fields` type {es-pull}89016[#89016] (issue: {es-issue}44700[#44700]) + +Rollup:: +* Fork `TransportRollupCapsAction` to MANAGEMENT POOL {es-pull}89803[#89803] + +Search:: +* Empty intervals needs to start in position -1 {es-pull}89962[#89962] (issue: {es-issue}89789[#89789]) + +Transform:: +* Scheduler concurrency fix {es-pull}89716[#89716] (issue: {es-issue}88991[#88991]) + +[[enhancement-8.4.2]] +[float] +=== Enhancements + +Allocation:: +* Log unsuccessful attempts to get credentials from web identity tokens {es-pull}88241[#88241] + +Health:: +* Add delayed allocation diagnosis case to shards availability indicator {es-pull}89056[#89056] + +[[upgrade-8.4.2]] +[float] +=== Upgrades + +Packaging:: +* Update OpenJDK to 18.0.2.1 {es-pull}89535[#89535] (issue: {es-issue}89531[#89531]) + + diff --git a/docs/reference/release-notes/highlights.asciidoc b/docs/reference/release-notes/highlights.asciidoc index 87a5f6420252e..869c8ad839fd4 100644 --- a/docs/reference/release-notes/highlights.asciidoc +++ b/docs/reference/release-notes/highlights.asciidoc @@ -1,8 +1,6 @@ [[release-highlights]] == What's new in {minor-version} -coming::[{minor-version}] - Here are the highlights of what's new and improved in {es} {minor-version}! ifeval::[\{release-state}\"!=\"unreleased\"] For detailed information about this release, see the <> and @@ -17,13 +15,95 @@ Other versions: | {ref-bare}/8.1/release-highlights.html[8.1] | {ref-bare}/8.0/release-highlights.html[8.0] -// The notable-highlights tag marks entries that -// should be featured in the Stack Installation and Upgrade Guide: // tag::notable-highlights[] -// [discrete] -// === Heading -// -// Description. + +[discrete] +[[speed_up_filters_range_date_histogram_aggs]] +=== Speed up filters/range/date_histogram aggs +This speeds up a few aggregations when they don't have child aggregations. +That's super common, for example, the histogram at the top of Kibana's +discover tab is a `date_histogram` without any child aggregations. That +particular aggregation is sped up by about 85% in our rally tests, dropping +from 250ms to 30ms. + +{es-pull}81322[#81322] + +[discrete] +[[minimum_conditions_for_rollover_api_ilm_actions]] +=== Minimum conditions for the rollover API and ILM actions +The rollover API and ILM actions now support minimum conditions for rollover. + +Minimum conditions prevent rollover from occuring until they are met. That is, an index +will rollover once one or more max conditions are satisfied and all min conditions are satisfied. + +As an example, the following ILM policy would roll an index over if it is at least 7 days old or +at least 100 gigabytes, but only as long as the index is not empty. + +[source,console] +---- +PUT _ilm/policy/my_policy +{ + "policy": { + "phases": { + "hot": { + "actions": { + "rollover" : { + "max_age": "7d", + "max_size": "100gb", + "min_docs": 1 + } + } + } + } + } +} +---- + +{es-pull}83345[#83345] + +[discrete] +[[infinite_adaptive_retries_for_transforms]] +=== Infinite and adaptive retries for transforms +Infinite and adaptive retries – available in 8.4 – makes it possible for +transforms to recover after a failure without any user intervention. Retries +can be configured per transform. The transform retries become less frequent +progressively. The interval between retries doubles after reaching a one-hour +threshold. This is because the possibility that retries solve the problem is +less likely after each failed retry. + +In the *Transforms* page in *{stack-manage-app}* in {kib}, the number of retries +can be configured when creating a new transform or editing an existing one. + +{es-pull}87361[#87361] + +[discrete] +[[composite_aggregations_in_datafeeds_are_generally_available]] +=== Composite aggregations in datafeeds are Generally Available +The support for +{ml-docs}/ml-configuring-aggregation.html#aggs-using-composite[composite aggregations] +in datafeeds is now generally available. + +[discrete] +[[early-stopping-dfa]] +=== Optimizing speed of {dfanalytics} +{dfanalytics-cap} is even faster in 8.4. The new function automatically +stops the process of hyperparameter optimization early in case the +accuracy gain for a different set of hyperparameter values would be +insignificant. The early stopping of the optimization process results in a +shorter runtime for the {dfanalytics-job}. + +{es-pull}88589[#88589] + +[discrete] +[[integrate_ann_into_search_endpoint]] +=== Integrate ANN into `_search` endpoint +This change adds a `knn` option to the `_search` API to support ANN +search. It's powered by the same Lucene ANN capabilities as the old +`_knn_search` endpoint. The `knn` option can be combined with other +search features like queries and aggregations. + +{es-pull}88694[#88694] + // end::notable-highlights[] diff --git a/docs/reference/rest-api/common-parms.asciidoc b/docs/reference/rest-api/common-parms.asciidoc index e12590e18106c..4513b351bf229 100644 --- a/docs/reference/rest-api/common-parms.asciidoc +++ b/docs/reference/rest-api/common-parms.asciidoc @@ -313,9 +313,8 @@ end::http-format[] tag::frequency[] The interval between checks for changes in the source indices when the -{transform} is running continuously. Also determines the retry interval in the -event of transient failures while the {transform} is searching or indexing. The -minimum value is `1s` and the maximum is `1h`. The default value is `1m`. +{transform} is running continuously. The minimum value is `1s` and the maximum +is `1h`. The default value is `1m`. end::frequency[] tag::from[] @@ -730,6 +729,9 @@ The following groupings are currently supported: * <<_histogram,Histogram>> * <<_terms,Terms>> +The grouping properties can optionally have a `missing_bucket` property. If +it's `true`, documents without a value in the respective `group_by` field are +included. Defaults to `false`. -- end::pivot-group-by[] @@ -1004,11 +1006,14 @@ criteria is deleted from the destination index. end::transform-retention[] tag::transform-retention-time[] -Specifies that the {transform} uses a time field to set the retention policy. +Specifies that the {transform} uses a time field to set the retention policy. +Data is deleted if `time.field` for the retention policy exists and contains +data older than `max.age`. end::transform-retention-time[] tag::transform-retention-time-field[] -The date field that is used to calculate the age of the document. +The date field that is used to calculate the age of the document. Set +`time.field` to an existing date field. end::transform-retention-time-field[] tag::transform-retention-time-max-age[] diff --git a/docs/reference/search.asciidoc b/docs/reference/search.asciidoc index 4dad5ec48c9dc..3a147e6ad9bc0 100644 --- a/docs/reference/search.asciidoc +++ b/docs/reference/search.asciidoc @@ -15,7 +15,6 @@ exception of the <>. * <> * <> * <> -* <> * <> * <> * <> diff --git a/docs/reference/search/knn-search.asciidoc b/docs/reference/search/knn-search.asciidoc index a4b1f9c00b3be..e37fec4612ce0 100644 --- a/docs/reference/search/knn-search.asciidoc +++ b/docs/reference/search/knn-search.asciidoc @@ -4,8 +4,8 @@ kNN search ++++ +deprecated::[8.4.0,"The kNN search API has been replaced by the <<<> in the search API."] experimental::[] - Performs a k-nearest neighbor (kNN) search and returns the matching documents. //// @@ -46,6 +46,7 @@ GET my-index/_knn_search } ---- // TEST[continued] +// TEST[warning:The kNN search API has been replaced by the `knn` option in the search API.] [[knn-search-api-request]] ==== {api-request-title} diff --git a/docs/reference/search/search-your-data/ccs-version-compat-matrix.asciidoc b/docs/reference/search/search-your-data/ccs-version-compat-matrix.asciidoc index 8d651a62002d7..42e11dc63b18e 100644 --- a/docs/reference/search/search-your-data/ccs-version-compat-matrix.asciidoc +++ b/docs/reference/search/search-your-data/ccs-version-compat-matrix.asciidoc @@ -1,12 +1,13 @@ -[cols="^,^,^,^,^,^,^"] +[cols="^,^,^,^,^,^,^,^"] |==== -| 6+^h| Remote cluster version +| 7+^h| Remote cluster version h| Local cluster version - | 6.8 | 7.1–7.16 | 7.17 | 8.0 | 8.1 | 8.2 -| 6.8 | {yes-icon} | {yes-icon} | {yes-icon} | {no-icon} | {no-icon} | {no-icon} -| 7.1–7.16 | {yes-icon} | {yes-icon} | {yes-icon} | {no-icon} | {no-icon} | {no-icon} -| 7.17 | {yes-icon} | {yes-icon} | {yes-icon} | {yes-icon} | {yes-icon} | {yes-icon} -| 8.0 | {no-icon} | {no-icon} | {yes-icon} | {yes-icon} | {yes-icon} | {yes-icon} -| 8.1 | {no-icon} | {no-icon} | {no-icon} | {yes-icon} | {yes-icon} | {yes-icon} -| 8.2 | {no-icon} | {no-icon} | {no-icon} | {no-icon} | {yes-icon} | {yes-icon} -|==== + | 6.8 | 7.1–7.16 | 7.17 | 8.0 | 8.1 | 8.2 | 8.3 +| 6.8 | {yes-icon} | {yes-icon} | {yes-icon} | {no-icon} | {no-icon} | {no-icon} | {no-icon} +| 7.1–7.16 | {yes-icon} | {yes-icon} | {yes-icon} | {no-icon} | {no-icon} | {no-icon} | {no-icon} +| 7.17 | {yes-icon} | {yes-icon} | {yes-icon} | {yes-icon} | {yes-icon} | {yes-icon} | {yes-icon} +| 8.0 | {no-icon} | {no-icon} | {yes-icon} | {yes-icon} | {yes-icon} | {yes-icon} | {yes-icon} +| 8.1 | {no-icon} | {no-icon} | {no-icon} | {yes-icon} | {yes-icon} | {yes-icon} | {yes-icon} +| 8.2 | {no-icon} | {no-icon} | {no-icon} | {no-icon} | {yes-icon} | {yes-icon} | {yes-icon} +| 8.3 | {no-icon} | {no-icon} | {no-icon} | {no-icon} | {no-icon} | {yes-icon} | {yes-icon} +|==== \ No newline at end of file diff --git a/docs/reference/search/search-your-data/knn-search.asciidoc b/docs/reference/search/search-your-data/knn-search.asciidoc index c16fbff4cff41..dfa53d6ffbf3c 100644 --- a/docs/reference/search/search-your-data/knn-search.asciidoc +++ b/docs/reference/search/search-your-data/knn-search.asciidoc @@ -279,41 +279,14 @@ POST image-index/_search ---- // TEST[continued] -[source,console-result] ----- -{ - "took": 5, - "timed_out": false, - "_shards": { - "total": 1, - "successful": 1, - "skipped": 0, - "failed": 0 - }, - "hits": { - "total": { - "value": 1, - "relation": "eq" - }, - "max_score": 0.003144654, - "hits": [ - { - "_index": "image-index", - "_id": "2", - "_score": 0.003144654, - "fields": { - "title": ["alpine lake"] - } - } - ] - } -} ----- -// TESTRESPONSE[s/"took": 5/"took": $body.took/] -// TESTRESPONSE[s/,\n \.\.\.//] +NOTE: The filter is applied **during** the approximate kNN search to ensure +that `k` matching documents are returned. This contrasts with a +post-filtering approach, where the filter is applied **after** the approximate +kNN search completes. Post-filtering has the downside that it sometimes +returns fewer than k results, even when there are enough matching documents. [discrete] -==== Combine approximate kNN and a query +==== Combine approximate kNN with other features You can perform 'hybrid retrieval' by providing both the <> and a <>: @@ -354,6 +327,11 @@ each score in the sum. In the example above, the scores will be calculated as score = 0.9 * match_score + 0.1 * knn_score ``` +The `knn` option can also be used with <>. In general, {es} computes aggregations +over all documents that match the search. So for approximate kNN search, aggregations are calculated on the top `k` +nearest documents. If the search also includes a `query`, then aggregations are calculated on the combined set of `knn` +and `query` matches. + [discrete] [[knn-indexing-considerations]] ==== Indexing considerations diff --git a/docs/reference/search/search-your-data/paginate-search-results.asciidoc b/docs/reference/search/search-your-data/paginate-search-results.asciidoc index 931e6cffc6675..1f357f6fe500b 100644 --- a/docs/reference/search/search-your-data/paginate-search-results.asciidoc +++ b/docs/reference/search/search-your-data/paginate-search-results.asciidoc @@ -46,7 +46,112 @@ You can use the `search_after` parameter to retrieve the next page of hits using a set of <> from the previous page. Using `search_after` requires multiple search requests with the same `query` and -`sort` values. If a <> occurs between these requests, +`sort` values. The first step is to run an initial request. The following +example sorts the results by two fields (`date` and `tie_breaker_id`): + + +//// +[source,console] +-------------------------------------------------- +PUT twitter +{ + "mappings": { + "properties": { + "tie_breaker_id": { + "type": "keyword" + }, + "date": { + "type": "date" + } + } + } +} +-------------------------------------------------- +//// + +[source,console] +-------------------------------------------------- +GET twitter/_search +{ + "query": { + "match": { + "title": "elasticsearch" + } + }, + "sort": [ + {"date": "asc"}, + {"tie_breaker_id": "asc"} <1> + ] +} +-------------------------------------------------- +//TEST[continued] + +<1> A copy of the `_id` field with `doc_values` enabled + +The search response includes an array of `sort` values for each hit: + +[source,console-result] +---- +{ + "took" : 17, + "timed_out" : false, + "_shards" : ..., + "hits" : { + "total" : ..., + "max_score" : null, + "hits" : [ + ... + { + "_index" : "twitter", + "_id" : "654322", + "_score" : null, + "_source" : ..., + "sort" : [ + 1463538855, + "654322" + ] + }, + { + "_index" : "twitter", + "_id" : "654323", + "_score" : null, + "_source" : ..., + "sort" : [ <1> + 1463538857, + "654323" + ] + } + ] + } +} +---- +// TESTRESPONSE[skip: demo of where the sort values are] + +<1> Sort values for the last returned hit. + +To retrieve the next page of results, repeat the request, take the `sort` values from the +last hit, and insert those into the `search_after` array: + +[source,console] +-------------------------------------------------- +GET twitter/_search +{ + "query": { + "match": { + "title": "elasticsearch" + } + }, + "search_after": [1463538857, "654323"], + "sort": [ + {"date": "asc"}, + {"tie_breaker_id": "asc"} + ] +} +-------------------------------------------------- +//TEST[continued] + +Repeat this process by updating the `search_after` array every time you retrieve a +new page of results. If a <> occurs between these requests, the order of your results may change, causing inconsistent results across pages. To prevent this, you can create a <> to preserve the current index state over your searches. diff --git a/docs/reference/search/suggesters/completion-suggest.asciidoc b/docs/reference/search/suggesters/completion-suggest.asciidoc index 0ba27e7d90742..2237d209f1381 100644 --- a/docs/reference/search/suggesters/completion-suggest.asciidoc +++ b/docs/reference/search/suggesters/completion-suggest.asciidoc @@ -15,10 +15,18 @@ but are costly to build and are stored in-memory. [[completion-suggester-mapping]] ===== Mapping +include::../../mapping/types/completion.asciidoc[tag=completion-mapping] -To use this feature, specify a special mapping for this field, -which indexes the field values for fast completions. +[[indexing]] +===== Indexing + +You index suggestions like any other field. A suggestion is made of an +`input` and an optional `weight` attribute. An `input` is the expected +text to be matched by a suggestion query and the `weight` determines how +the suggestions will be scored. Indexing a suggestion is as follows: + +//// [source,console] -------------------------------------------------- PUT music @@ -27,53 +35,13 @@ PUT music "properties": { "suggest": { "type": "completion" - }, - "title": { - "type": "keyword" } } } } -------------------------------------------------- // TESTSETUP - -Mapping supports the following parameters: - -[horizontal] -`analyzer`:: - The index analyzer to use, defaults to `simple`. - -`search_analyzer`:: - The search analyzer to use, defaults to value of `analyzer`. - -`preserve_separators`:: - Preserves the separators, defaults to `true`. - If disabled, you could find a field starting with `Foo Fighters`, if you - suggest for `foof`. - -`preserve_position_increments`:: - Enables position increments, defaults to `true`. - If disabled and using stopwords analyzer, you could get a - field starting with `The Beatles`, if you suggest for `b`. *Note*: You - could also achieve this by indexing two inputs, `Beatles` and - `The Beatles`, no need to change a simple analyzer, if you are able to - enrich your data. - -`max_input_length`:: - Limits the length of a single input, defaults to `50` UTF-16 code points. - This limit is only used at index time to reduce the total number of - characters per input string in order to prevent massive inputs from - bloating the underlying datastructure. Most use cases won't be influenced - by the default value since prefix completions seldom grow beyond prefixes longer - than a handful of characters. - -[[indexing]] -===== Indexing - -You index suggestions like any other field. A suggestion is made of an -`input` and an optional `weight` attribute. An `input` is the expected -text to be matched by a suggestion query and the `weight` determines how -the suggestions will be scored. Indexing a suggestion is as follows: +//// [source,console] -------------------------------------------------- diff --git a/docs/reference/search/terms-enum.asciidoc b/docs/reference/search/terms-enum.asciidoc index 1853a0ad77849..68048b372e793 100644 --- a/docs/reference/search/terms-enum.asciidoc +++ b/docs/reference/search/terms-enum.asciidoc @@ -41,6 +41,11 @@ If the `complete` flag is `false`, the returned `terms` set may be incomplete and should be treated as approximate. This can occur due to a few reasons, such as a request timeout or a node error. +NOTE: The terms enum API may return terms from deleted documents. Deleted +documents are initially only marked as deleted. It is not until their segments +are <> that documents are actually deleted. Until +that happens, the terms enum API will return terms from these documents. + [[search-terms-enum-api-request]] ==== {api-request-title} @@ -104,4 +109,4 @@ query rewrites to `match_none`. (Optional, string) The string after which terms in the index should be returned. Allows for a form of pagination if the last result from one request is passed as the `search_after` -parameter for a subsequent request. +parameter for a subsequent request. \ No newline at end of file diff --git a/docs/reference/settings/ilm-settings.asciidoc b/docs/reference/settings/ilm-settings.asciidoc index 4462ee4719089..4884ec429b22f 100644 --- a/docs/reference/settings/ilm-settings.asciidoc +++ b/docs/reference/settings/ilm-settings.asciidoc @@ -23,7 +23,7 @@ indices. Defaults to `true`. [[indices-lifecycle-poll-interval]] `indices.lifecycle.poll_interval`:: -(<>, <>) +(<>, <>) How often {ilm} checks for indices that meet policy criteria. Defaults to `10m`. ==== Index level settings @@ -31,34 +31,38 @@ These index-level {ilm-init} settings are typically configured through index templates. For more information, see <>. `index.lifecycle.indexing_complete`:: -(<>, Boolean) -Indicates whether or not the index has been rolled over. +(<>, Boolean) +Indicates whether or not the index has been rolled over. Automatically set to `true` when {ilm-init} completes the rollover action. You can explicitly set it to <>. Defaults to `false`. +[[index-lifecycle-name]] `index.lifecycle.name`:: (<>, string) The name of the policy to use to manage the index. For information about how {es} applies policy changes, see <>. +If you are restoring an index from snapshot that was previously managed by {ilm}, +you can override this setting to null during the restore operation to disable +further management of the index. See also <>. [[index-lifecycle-origination-date]] `index.lifecycle.origination_date`:: -(<>, long) -If specified, this is the timestamp used to calculate the index age for its phase transitions. -Use this setting if you create a new index that contains old data and -want to use the original creation date to calculate the index age. +(<>, long) +If specified, this is the timestamp used to calculate the index age for its phase transitions. +Use this setting if you create a new index that contains old data and +want to use the original creation date to calculate the index age. Specified as a Unix epoch value in milliseconds. [[index-lifecycle-parse-origination-date]] `index.lifecycle.parse_origination_date`:: -(<>, Boolean) -Set to `true` to parse the origination date from the index name. -This origination date is used to calculate the index age for its phase transitions. -The index name must match the pattern `^.*-{date_format}-\\d+`, +(<>, Boolean) +Set to `true` to parse the origination date from the index name. +This origination date is used to calculate the index age for its phase transitions. +The index name must match the pattern `^.*-{date_format}-\\d+`, where the `date_format` is `yyyy.MM.dd` and the trailing digits are optional. -An index that was rolled over would normally match the full format, -for example `logs-2016.10.31-000002`). +An index that was rolled over would normally match the full format, +for example `logs-2016.10.31-000002`). If the index name doesn't match the pattern, index creation fails. [[index-lifecycle-step-wait-time-threshold]] @@ -68,9 +72,13 @@ Time to wait for the cluster to resolve allocation issues during an {ilm-init} <> action. Must be greater than `1h` (1 hour). Defaults to `12h` (12 hours). See <>. +[[index-lifecycle-rollover-alias]] `index.lifecycle.rollover_alias`:: -(<>, string) +(<>, string) The index alias to update when the index rolls over. Specify when using a policy that contains a rollover action. When the index rolls over, the alias is updated to reflect that the index is no longer the write index. For more information about rolling indices, see <>. +If you are restoring an index from snapshot that was previously managed by {ilm}, +you can override this setting to null during the restore operation to disable +further management of future indices. See also <>. diff --git a/docs/reference/setup.asciidoc b/docs/reference/setup.asciidoc index e1b562eac3bc5..cdf5889fa1477 100644 --- a/docs/reference/setup.asciidoc +++ b/docs/reference/setup.asciidoc @@ -45,6 +45,8 @@ resource-heavy {ls} deployment should be on its own host. include::setup/install.asciidoc[] +include::setup/run-elasticsearch-locally.asciidoc[] + include::setup/configuration.asciidoc[] include::setup/important-settings.asciidoc[] diff --git a/docs/reference/setup/add-nodes.asciidoc b/docs/reference/setup/add-nodes.asciidoc index 11912abeffaba..0be2d8c643a9f 100644 --- a/docs/reference/setup/add-nodes.asciidoc +++ b/docs/reference/setup/add-nodes.asciidoc @@ -25,21 +25,21 @@ green. image::setup/images/elas_0204.png["A cluster with three nodes"] -You can run multiple nodes on your local machine in order to experiment with how -an {es} cluster of multiple nodes behaves. To add a node to a cluster running on -your local machine: - -. Set up a new {es} instance. -. Specify the name of the cluster with the `cluster.name` setting in -`elasticsearch.yml`. For example, to add a node to the `logging-prod` cluster, -add the line `cluster.name: "logging-prod"` to `elasticsearch.yml`. -. Start {es}. The node automatically discovers and joins the specified cluster. - -To add a node to a cluster running on multiple machines, you must also -<> so that the new node can discover +[discrete] +=== Enroll nodes in an existing cluster +You can enroll additional nodes on your local machine to experiment with how an +{es} cluster with multiple nodes behaves. + +[NOTE] +==== +To add a node to a cluster running on multiple machines, you must also set +<> so that the new node can discover the rest of its cluster. +==== + +include::../../../x-pack/docs/en/security/enroll-nodes.asciidoc[] -For more information about discovery and shard allocation, see +For more information about discovery and shard allocation, refer to <> and <>. [discrete] diff --git a/docs/reference/setup/install.asciidoc b/docs/reference/setup/install.asciidoc index 72a10d323a7ca..9ae75ea05ada5 100644 --- a/docs/reference/setup/install.asciidoc +++ b/docs/reference/setup/install.asciidoc @@ -20,6 +20,8 @@ If you want to install and manage {es} yourself, you can: * Run {es} in a <>. * Set up and manage {es}, {kib}, {agent}, and the rest of the Elastic Stack on Kubernetes with {eck-ref}[{eck}]. +TIP: To try out Elasticsearch on your own machine, we recommend using Docker and running both Elasticsearch and Kibana. For more information, see <>. + [discrete] [[elasticsearch-install-packages]] === Elasticsearch install packages diff --git a/docs/reference/setup/install/docker.asciidoc b/docs/reference/setup/install/docker.asciidoc index 8e2e9b3e16858..6db93a2b9e601 100644 --- a/docs/reference/setup/install/docker.asciidoc +++ b/docs/reference/setup/install/docker.asciidoc @@ -263,6 +263,10 @@ password for the `elastic` and `kibana_system` users with the `ELASTIC_PASSWORD` and `KIBANA_PASSWORD` variables. These variable are referenced by the `docker-compose.yml` file. +IMPORTANT: Your passwords must be alphanumeric, and cannot contain special +characters such as `!` or `@`. The `bash` script included in the +`docker-compose.yml` file only operates on alphanumeric characters. + ["source","txt",subs="attributes"] ---- include::docker/.env[] diff --git a/docs/reference/setup/install/docker/docker-compose.yml b/docs/reference/setup/install/docker/docker-compose.yml index 90f0ff363bb98..4c6ba48035b4a 100644 --- a/docs/reference/setup/install/docker/docker-compose.yml +++ b/docs/reference/setup/install/docker/docker-compose.yml @@ -53,7 +53,7 @@ services: echo "Waiting for Elasticsearch availability"; until curl -s --cacert config/certs/ca/ca.crt https://es01:9200 | grep -q "missing authentication credentials"; do sleep 30; done; echo "Setting kibana_system password"; - until curl -s -X POST --cacert config/certs/ca/ca.crt -u elastic:${ELASTIC_PASSWORD} -H "Content-Type: application/json" https://es01:9200/_security/user/kibana_system/_password -d "{\"password\":\"${KIBANA_PASSWORD}\"}" | grep -q "^{}"; do sleep 10; done; + until curl -s -X POST --cacert config/certs/ca/ca.crt -u "elastic:${ELASTIC_PASSWORD}" -H "Content-Type: application/json" https://es01:9200/_security/user/kibana_system/_password -d "{\"password\":\"${KIBANA_PASSWORD}\"}" | grep -q "^{}"; do sleep 10; done; echo "All done!"; ' healthcheck: diff --git a/docs/reference/setup/restart-cluster.asciidoc b/docs/reference/setup/restart-cluster.asciidoc index 6b37eba826f2a..e3090f9ec405a 100644 --- a/docs/reference/setup/restart-cluster.asciidoc +++ b/docs/reference/setup/restart-cluster.asciidoc @@ -8,6 +8,11 @@ nodes in the cluster while in the case of <>, you shut down only one node at a time, so the service remains uninterrupted. +[WARNING] +==== +Nodes exceeding the low watermark threshold will be slow to restart. Reduce the disk +usage below the <> before to restarting nodes. +==== [discrete] [[restart-cluster-full]] diff --git a/docs/reference/setup/run-elasticsearch-locally.asciidoc b/docs/reference/setup/run-elasticsearch-locally.asciidoc new file mode 100644 index 0000000000000..66152933b0e20 --- /dev/null +++ b/docs/reference/setup/run-elasticsearch-locally.asciidoc @@ -0,0 +1,187 @@ +[[run-elasticsearch-locally]] +== Run Elasticsearch locally + +//// +IMPORTANT: This content is replicated in the Elasticsearch repo +README.ascidoc file. If you make changes, you must also update the +Elasticsearch README. ++ +GitHub renders the tagged region directives when you view the README, +so it's not possible to just include the content from the README. Darn. ++ +Also note that there are similar instructions in the Kibana guide: +https://www.elastic.co/guide/en/kibana/current/docker.html +//// + +To try out Elasticsearch on your own machine, we recommend using Docker +and running both Elasticsearch and Kibana. +Docker images are available from the https://www.docker.elastic.co[Elastic Docker registry]. + +NOTE: Starting in Elasticsearch 8.0, security is enabled by default. +The first time you start Elasticsearch, TLS encryption is configured automatically, +a password is generated for the `elastic` user, +and a Kibana enrollment token is created so you can connect Kibana to your secured cluster. + +For other installation options, see the +https://www.elastic.co/guide/en/elasticsearch/reference/current/install-elasticsearch.html[Elasticsearch installation documentation]. + +[discrete] +=== Start Elasticsearch + +. Install and start https://www.docker.com/products/docker-desktop[Docker +Desktop]. Go to **Preferences > Resources > Advanced** and set Memory to at least 4GB. + +. Start an Elasticsearch container: +ifeval::["{release-state}"=="unreleased"] ++ +NOTE: Version {version} of {es} has not yet been released, so no +Docker image is currently available for this version. +endif::[] +ifeval::["{release-state}"!="unreleased"] ++ +[source,sh,subs="attributes"] +---- +docker network create elastic +docker pull docker.elastic.co/elasticsearch/elasticsearch:{version} +docker run --name elasticsearch --net elastic -p 9200:9200 -p 9300:9300 -e "discovery.type=single-node" -t docker.elastic.co/elasticsearch/elasticsearch:{version} +---- +endif::[] ++ +When you start Elasticsearch for the first time, the generated `elastic` user password and +Kibana enrollment token are output to the terminal. ++ +NOTE: You might need to scroll back a bit in the terminal to view the password +and enrollment token. + +. Copy the generated password and enrollment token and save them in a secure +location. These values are shown only when you start Elasticsearch for the first time. +You'll use these to enroll Kibana with your Elasticsearch cluster and log in. + +[discrete] +=== Start Kibana + +Kibana enables you to easily send requests to Elasticsearch and analyze, visualize, and manage data interactively. + +. In a new terminal session, start Kibana and connect it to your Elasticsearch container: +ifeval::["{release-state}"=="unreleased"] ++ +NOTE: Version {version} of {kib} has not yet been released, so no +Docker image is currently available for this version. +endif::[] +ifeval::["{release-state}"!="unreleased"] ++ +[source,sh,subs="attributes"] +---- +docker pull docker.elastic.co/kibana/kibana:{version} +docker run --name kibana --net elastic -p 5601:5601 docker.elastic.co/kibana/kibana:{version} +---- +endif::[] ++ +When you start Kibana, a unique URL is output to your terminal. + +. To access Kibana, open the generated URL in your browser. + + .. Paste the enrollment token that you copied when starting + Elasticsearch and click the button to connect your Kibana instance with Elasticsearch. + + .. Log in to Kibana as the `elastic` user with the password that was generated + when you started Elasticsearch. + +[discrete] +=== Send requests to Elasticsearch + +You send data and other requests to Elasticsearch through REST APIs. +You can interact with Elasticsearch using any client that sends HTTP requests, +such as the https://www.elastic.co/guide/en/elasticsearch/client/index.html[Elasticsearch +language clients] and https://curl.se[curl]. +Kibana's developer console provides an easy way to experiment and test requests. +To access the console, go to **Management > Dev Tools**. + +[discrete] +=== Add data + +You index data into Elasticsearch by sending JSON objects (documents) through the REST APIs. +Whether you have structured or unstructured text, numerical data, or geospatial data, +Elasticsearch efficiently stores and indexes it in a way that supports fast searches. + +For timestamped data such as logs and metrics, you typically add documents to a +data stream made up of multiple auto-generated backing indices. + +To add a single document to an index, submit an HTTP post request that targets the index. + +[source,console] +---- +POST /customer/_doc/1 +{ + "firstname": "Jennifer", + "lastname": "Walters" +} +---- + +This request automatically creates the `customer` index if it doesn't exist, +adds a new document that has an ID of 1, and +stores and indexes the `firstname` and `lastname` fields. + +The new document is available immediately from any node in the cluster. +You can retrieve it with a GET request that specifies its document ID: + +[source,console] +---- +GET /customer/_doc/1 +---- +// TEST[continued] + +To add multiple documents in one request, use the `_bulk` API. +Bulk data must be newline-delimited JSON (NDJSON). +Each line must end in a newline character (`\n`), including the last line. + +[source,console] +---- +PUT customer/_bulk +{ "create": { } } +{ "firstname": "Monica","lastname":"Rambeau"} +{ "create": { } } +{ "firstname": "Carol","lastname":"Danvers"} +{ "create": { } } +{ "firstname": "Wanda","lastname":"Maximoff"} +{ "create": { } } +{ "firstname": "Jennifer","lastname":"Takeda"} +---- +// TEST[continued] + +[discrete] +=== Search + +Indexed documents are available for search in near real-time. +The following search matches all customers with a first name of _Jennifer_ +in the `customer` index. + +[source,console] +---- +GET customer/_search +{ + "query" : { + "match" : { "firstname": "Jennifer" } + } +} +---- +// TEST[continued] + +[discrete] +=== Explore + +You can use Discover in Kibana to interactively search and filter your data. +From there, you can start creating visualizations and building and sharing dashboards. + +To get started, create a _data view_ that connects to one or more Elasticsearch indices, +data streams, or index aliases. + +. Go to **Management > Stack Management > Kibana > Data Views**. +. Select **Create data view**. +. Enter a name for the data view and a pattern that matches one or more indices, +such as _customer_. +. Select **Save data view to Kibana**. + +To start exploring, go to **Analytics > Discover**. + + diff --git a/docs/reference/snapshot-restore/apis/delete-snapshot-api.asciidoc b/docs/reference/snapshot-restore/apis/delete-snapshot-api.asciidoc index a060e7f2d59df..5bc46f54ec137 100644 --- a/docs/reference/snapshot-restore/apis/delete-snapshot-api.asciidoc +++ b/docs/reference/snapshot-restore/apis/delete-snapshot-api.asciidoc @@ -53,6 +53,11 @@ Name of the repository to delete a snapshot from. (Required, string) Comma-separated list of snapshot names to delete. Also accepts wildcards (`*`). +[[delete-snapshot-api-query-params]] +==== {api-query-parms-title} + +include::{es-repo-dir}/rest-api/common-parms.asciidoc[tag=master-timeout] + [[delete-snapshot-api-example]] ==== {api-example-title} diff --git a/docs/reference/snapshot-restore/apis/get-snapshot-status-api.asciidoc b/docs/reference/snapshot-restore/apis/get-snapshot-status-api.asciidoc index 94a69069145a6..150f4dfff48ab 100644 --- a/docs/reference/snapshot-restore/apis/get-snapshot-status-api.asciidoc +++ b/docs/reference/snapshot-restore/apis/get-snapshot-status-api.asciidoc @@ -169,7 +169,7 @@ Indicates the current snapshot state. `FAILED`:: The snapshot finished with an error and failed to store any data. -`IN_PROGRESS`:: +`STARTED`:: The snapshot is currently running. `PARTIAL`:: @@ -200,7 +200,7 @@ Number of shards that are still initializing. `started`:: (integer) -Number of shards that have started but not are not finalized. +Number of shards that have started but are not finalized. `finalizing`:: (integer) diff --git a/docs/reference/transform/apis/get-transform.asciidoc b/docs/reference/transform/apis/get-transform.asciidoc index a4604157e96d3..b819877017b70 100644 --- a/docs/reference/transform/apis/get-transform.asciidoc +++ b/docs/reference/transform/apis/get-transform.asciidoc @@ -49,6 +49,12 @@ include::{es-repo-dir}/rest-api/common-parms.asciidoc[tag=transform-id-wildcard] (Optional, Boolean) include::{es-repo-dir}/rest-api/common-parms.asciidoc[tag=allow-no-match-transforms1] +`exclude_generated`:: +(Optional, Boolean) +Excludes fields that were automatically added when creating the transform. +This allows the configuration to be in an acceptable format to be retrieved +and then added to another cluster. Default is false. + `from`:: (Optional, integer) include::{es-repo-dir}/rest-api/common-parms.asciidoc[tag=from-transforms] @@ -57,11 +63,7 @@ include::{es-repo-dir}/rest-api/common-parms.asciidoc[tag=from-transforms] (Optional, integer) include::{es-repo-dir}/rest-api/common-parms.asciidoc[tag=size-transforms] -`exclude_generated`:: -(Optional, Boolean) -Excludes fields that were automatically added when creating the transform. -This allows the configuration to be in an acceptable format to be retrieved -and then added to another cluster. Default is false. + [[get-transform-response]] == {api-response-body-title} diff --git a/docs/reference/transform/apis/preview-transform.asciidoc b/docs/reference/transform/apis/preview-transform.asciidoc index 34295a58a9ca1..9093b2d2b7e47 100644 --- a/docs/reference/transform/apis/preview-transform.asciidoc +++ b/docs/reference/transform/apis/preview-transform.asciidoc @@ -138,7 +138,6 @@ include::{es-repo-dir}/rest-api/common-parms.asciidoc[tag=pivot] .Properties of `pivot` [%collapsible%open] ==== - `aggregations` or `aggs`::: (Required, object) include::{es-repo-dir}/rest-api/common-parms.asciidoc[tag=pivot-aggs] @@ -183,7 +182,6 @@ include::{es-repo-dir}/rest-api/common-parms.asciidoc[tag=source-transforms] .Properties of `source` [%collapsible%open] ==== - `index`::: (Required, string or array) include::{es-repo-dir}/rest-api/common-parms.asciidoc[tag=source-index-transforms] @@ -215,7 +213,6 @@ include::{es-repo-dir}/rest-api/common-parms.asciidoc[tag=sync-time] .Properties of `time` [%collapsible%open] ===== - `delay`:::: (Optional, <>) include::{es-repo-dir}/rest-api/common-parms.asciidoc[tag=sync-time-delay] @@ -236,18 +233,18 @@ include::{es-repo-dir}/rest-api/common-parms.asciidoc[tag=transform-settings] .Properties of `settings` [%collapsible%open] ==== -`dates_as_epoch_millis`::: -(Optional, boolean) -include::{es-repo-dir}/rest-api/common-parms.asciidoc[tag=transform-settings-dates-as-epoch-milli] -`docs_per_second`::: -(Optional, float) -include::{es-repo-dir}/rest-api/common-parms.asciidoc[tag=transform-settings-docs-per-second] `align_checkpoints`::: (Optional, boolean) include::{es-repo-dir}/rest-api/common-parms.asciidoc[tag=transform-settings-align-checkpoints] +`dates_as_epoch_millis`::: +(Optional, boolean) +include::{es-repo-dir}/rest-api/common-parms.asciidoc[tag=transform-settings-dates-as-epoch-milli] `deduce_mappings`::: (Optional, boolean) include::{es-repo-dir}/rest-api/common-parms.asciidoc[tag=transform-settings-deduce-mappings] +`docs_per_second`::: +(Optional, float) +include::{es-repo-dir}/rest-api/common-parms.asciidoc[tag=transform-settings-docs-per-second] `max_page_search_size`::: (Optional, integer) include::{es-repo-dir}/rest-api/common-parms.asciidoc[tag=transform-settings-max-page-search-size] @@ -258,10 +255,6 @@ include::{es-repo-dir}/rest-api/common-parms.asciidoc[tag=transform-settings-max [[preview-transform-response]] == {api-response-body-title} -`preview`:: -(array) An array of documents. In particular, they are the JSON representation -of the documents that would be created in the destination index by the -{transform}. //Begin generated_dest_index `generated_dest_index`:: @@ -282,6 +275,12 @@ of the documents that would be created in the destination index by the ==== //End generated_dest_index +`preview`:: +(array) An array of documents. In particular, they are the JSON representation +of the documents that would be created in the destination index by the +{transform}. + + == {api-examples-title} [source,console] @@ -295,7 +294,8 @@ POST _transform/_preview "group_by": { "customer_id": { "terms": { - "field": "customer_id" + "field": "customer_id", + "missing_bucket": true } } }, diff --git a/docs/reference/transform/apis/put-transform.asciidoc b/docs/reference/transform/apis/put-transform.asciidoc index 52ba7597d4ea0..40e79a7526766 100644 --- a/docs/reference/transform/apis/put-transform.asciidoc +++ b/docs/reference/transform/apis/put-transform.asciidoc @@ -90,6 +90,7 @@ behavior may be desired if the source index does not exist until after the Period to wait for a response. If no response is received before the timeout expires, the request fails and returns an error. Defaults to `30s`. + [role="child_attributes"] [[put-transform-request-body]] == {api-request-body-title} @@ -162,7 +163,6 @@ include::{es-repo-dir}/rest-api/common-parms.asciidoc[tag=pivot-aggs] `group_by`::: (Required, object) include::{es-repo-dir}/rest-api/common-parms.asciidoc[tag=pivot-group-by] - ==== //End pivot @@ -200,18 +200,18 @@ include::{es-repo-dir}/rest-api/common-parms.asciidoc[tag=transform-settings] .Properties of `settings` [%collapsible%open] ==== -`dates_as_epoch_millis`::: -(Optional, boolean) -include::{es-repo-dir}/rest-api/common-parms.asciidoc[tag=transform-settings-dates-as-epoch-milli] -`docs_per_second`::: -(Optional, float) -include::{es-repo-dir}/rest-api/common-parms.asciidoc[tag=transform-settings-docs-per-second] `align_checkpoints`::: (Optional, boolean) include::{es-repo-dir}/rest-api/common-parms.asciidoc[tag=transform-settings-align-checkpoints] +`dates_as_epoch_millis`::: +(Optional, boolean) +include::{es-repo-dir}/rest-api/common-parms.asciidoc[tag=transform-settings-dates-as-epoch-milli] `deduce_mappings`::: (Optional, boolean) include::{es-repo-dir}/rest-api/common-parms.asciidoc[tag=transform-settings-deduce-mappings] +`docs_per_second`::: +(Optional, float) +include::{es-repo-dir}/rest-api/common-parms.asciidoc[tag=transform-settings-docs-per-second] `max_page_search_size`::: (Optional, integer) include::{es-repo-dir}/rest-api/common-parms.asciidoc[tag=transform-settings-max-page-search-size] @@ -261,7 +261,6 @@ include::{es-repo-dir}/rest-api/common-parms.asciidoc[tag=sync-time] .Properties of `time` [%collapsible%open] ===== - `delay`:::: (Optional, <>) include::{es-repo-dir}/rest-api/common-parms.asciidoc[tag=sync-time-delay] @@ -306,7 +305,8 @@ PUT _transform/ecommerce_transform1 "group_by": { "customer_id": { "terms": { - "field": "customer_id" + "field": "customer_id", + "missing_bucket": true } } }, diff --git a/docs/reference/transform/apis/stop-transform.asciidoc b/docs/reference/transform/apis/stop-transform.asciidoc index f36a2842478df..a4b633994b3eb 100644 --- a/docs/reference/transform/apis/stop-transform.asciidoc +++ b/docs/reference/transform/apis/stop-transform.asciidoc @@ -40,8 +40,8 @@ comma-separated list or a wildcard expression. To stop all {transforms}, use == {api-query-parms-title} `allow_no_match`:: -(Optional, Boolean) -include::{es-repo-dir}/rest-api/common-parms.asciidoc[tag=allow-no-match-transforms2] + (Optional, Boolean) + include::{es-repo-dir}/rest-api/common-parms.asciidoc[tag=allow-no-match-transforms2] `force`:: (Optional, Boolean) Set to `true` to stop a failed {transform} or to diff --git a/docs/reference/transform/apis/update-transform.asciidoc b/docs/reference/transform/apis/update-transform.asciidoc index e44cb0a037888..31c31be50784d 100644 --- a/docs/reference/transform/apis/update-transform.asciidoc +++ b/docs/reference/transform/apis/update-transform.asciidoc @@ -142,18 +142,18 @@ include::{es-repo-dir}/rest-api/common-parms.asciidoc[tag=transform-settings] .Properties of `settings` [%collapsible%open] ==== -`dates_as_epoch_millis`::: -(Optional, boolean) -include::{es-repo-dir}/rest-api/common-parms.asciidoc[tag=transform-settings-dates-as-epoch-milli] -`docs_per_second`::: -(Optional, float) -include::{es-repo-dir}/rest-api/common-parms.asciidoc[tag=transform-settings-docs-per-second] `align_checkpoints`::: (Optional, boolean) include::{es-repo-dir}/rest-api/common-parms.asciidoc[tag=transform-settings-align-checkpoints] +`dates_as_epoch_millis`::: +(Optional, boolean) +include::{es-repo-dir}/rest-api/common-parms.asciidoc[tag=transform-settings-dates-as-epoch-milli] `deduce_mappings`::: (Optional, boolean) include::{es-repo-dir}/rest-api/common-parms.asciidoc[tag=transform-settings-deduce-mappings] +`docs_per_second`::: +(Optional, float) +include::{es-repo-dir}/rest-api/common-parms.asciidoc[tag=transform-settings-docs-per-second] `max_page_search_size`::: (Optional, integer) include::{es-repo-dir}/rest-api/common-parms.asciidoc[tag=transform-settings-max-page-search-size] diff --git a/docs/reference/troubleshooting.asciidoc b/docs/reference/troubleshooting.asciidoc index a32c1484177c0..38967fc5f3e70 100644 --- a/docs/reference/troubleshooting.asciidoc +++ b/docs/reference/troubleshooting.asciidoc @@ -6,9 +6,46 @@ This section provides a series of troubleshooting solutions aimed at helping users fix problems that an {es} deployment might encounter. -Several troubleshooting issues can be diagnosed using the +[discrete] +[[troubleshooting-general]] +=== General +* <> +* Several troubleshooting issues can be diagnosed using the <>. +[discrete] +[[troubleshooting-data]] +=== Data +* <> +* <> +* <> +* <> +* <> +* <> +* <> +* <> + +[discrete] +[[troubleshooting-management]] +=== Management +* <> +* <> + +[discrete] +[[troubleshooting-snapshot]] +=== Snapshot and restore +* <> +* <> + +[discrete] +[[troubleshooting-others]] +=== Others +* <> +* <> +* <> +* <> +* <> + If none of these solutions relate to your issue, you can still get help: * For users with an active subscription, you can get help in several ways: @@ -36,23 +73,21 @@ the experts in the community, including people from Elastic. include::troubleshooting/fix-common-cluster-issues.asciidoc[] -include::troubleshooting/data/increase-shard-limit.asciidoc[] +include::troubleshooting/data/add-tier.asciidoc[] -include::troubleshooting/data/increase-cluster-shard-limit.asciidoc[] +include::troubleshooting/data/enable-cluster-allocation.asciidoc[] include::troubleshooting/data/enable-index-allocation.asciidoc[] -include::troubleshooting/data/enable-cluster-allocation.asciidoc[] - include::troubleshooting/data/data-tiers-mixed-with-node-attr.asciidoc[] -include::troubleshooting/data/add-tier.asciidoc[] +include::troubleshooting/data/increase-tier-capacity.asciidoc[] -include::troubleshooting/data/diagnose-unassigned-shards.asciidoc[] +include::troubleshooting/data/increase-shard-limit.asciidoc[] -include::troubleshooting/discovery-issues.asciidoc[] +include::troubleshooting/data/increase-cluster-shard-limit.asciidoc[] -include::troubleshooting/data/increase-tier-capacity.asciidoc[] +include::troubleshooting/corruption-issues.asciidoc[] include::troubleshooting/data/start-ilm.asciidoc[] @@ -62,8 +97,12 @@ include::troubleshooting/data/restore-from-snapshot.asciidoc[] include::troubleshooting/snapshot/add-repository.asciidoc[] +include::troubleshooting/discovery-issues.asciidoc[] + include::monitoring/troubleshooting.asciidoc[] include::transform/troubleshooting.asciidoc[leveloffset=+1] include::../../x-pack/docs/en/watcher/troubleshooting.asciidoc[] + +include::troubleshooting/troubleshooting-searches.asciidoc[] \ No newline at end of file diff --git a/docs/reference/troubleshooting/data/diagnose-unassigned-shards.asciidoc b/docs/reference/troubleshooting/common-issues/diagnose-unassigned-shards.asciidoc similarity index 100% rename from docs/reference/troubleshooting/data/diagnose-unassigned-shards.asciidoc rename to docs/reference/troubleshooting/common-issues/diagnose-unassigned-shards.asciidoc diff --git a/docs/reference/troubleshooting/corruption-issues.asciidoc b/docs/reference/troubleshooting/corruption-issues.asciidoc new file mode 100644 index 0000000000000..914045a69a8ea --- /dev/null +++ b/docs/reference/troubleshooting/corruption-issues.asciidoc @@ -0,0 +1,92 @@ +[[corruption-troubleshooting]] +== Troubleshooting corruption + +{es} expects that the data it reads from disk is exactly the data it previously +wrote. If it detects that the data on disk is different from what it wrote then +it will report some kind of exception such as: + +- `org.apache.lucene.index.CorruptIndexException` +- `org.elasticsearch.gateway.CorruptStateException` +- `org.elasticsearch.index.translog.TranslogCorruptedException` + +Typically these exceptions happen due to a checksum mismatch. Most of the data +that {es} writes to disk is followed by a checksum using a simple algorithm +known as CRC32 which is fast to compute and good at detecting the kinds of +random corruption that may happen when using faulty storage. A CRC32 checksum +mismatch definitely indicates that something is faulty, although of course a +matching checksum doesn't prove the absence of corruption. + +Verifying a checksum is expensive since it involves reading every byte of the +file which takes significant effort and might evict more useful data from the +filesystem cache, so systems typically don't verify the checksum on a file very +often. This is why you tend only to encounter a corruption exception when +something unusual is happening. For instance, corruptions are often detected +during merges, shard movements, and snapshots. This does not mean that these +processes are causing corruption: they are examples of the rare times where +reading a whole file is necessary. {es} takes the opportunity to verify the +checksum at the same time, and this is when the corruption is detected and +reported. It doesn't indicate the cause of the corruption or when it happened. +Corruptions can remain undetected for many months. + +The files that make up a Lucene index are written sequentially from start to +end and then never modified or overwritten. This access pattern means the +checksum computation is very simple and can happen on-the-fly as the file is +initially written, and also makes it very unlikely that an incorrect checksum +is due to a userspace bug at the time the file was written. The routine that +computes the checksum is straightforward, widely used, and very well-tested, so +you can be very confident that a checksum mismatch really does indicate that +the data read from disk is different from the data that {es} previously wrote. + +The files that make up a Lucene index are written in full before they are used. +If a file is needed to recover an index after a restart then your storage +system previously confirmed to {es} that this file was durably synced to disk. +On Linux this means that the `fsync()` system call returned successfully. {es} +sometimes detects that an index is corrupt because a file needed for recovery +has been truncated or is missing its footer. This indicates that your storage +system acknowledges durable writes incorrectly. + +There are many possible explanations for {es} detecting corruption in your +cluster. Databases like {es} generate a challenging I/O workload that may find +subtle infrastructural problems which other tests may miss. {es} is known to +expose the following problems as file corruptions: + +- Filesystem bugs, especially in newer and nonstandard filesystems which might + not have seen enough real-world production usage to be confident that they +work correctly. + +- https://www.elastic.co/blog/canonical-elastic-and-google-team-up-to-prevent-data-corruption-in-linux[Kernel bugs]. + +- Bugs in firmware running on the drive or RAID controller. + +- Incorrect configuration, for instance configuring `fsync()` to report success + before all durable writes have completed. + +- Faulty hardware, which may include the drive itself, the RAID controller, + your RAM or CPU. + +Data corruption typically doesn't result in other evidence of problems apart +from the checksum mismatch. Do not interpret this as an indication that your +storage subsystem is working correctly and therefore that {es} itself caused +the corruption. It is rare for faulty storage to show any evidence of problems +apart from the data corruption, but data corruption itself is a very strong +indicator that your storage subsystem is not working correctly. + +To rule out {es} as the source of data corruption, generate an I/O workload +using something other than {es} and look for data integrity errors. On Linux +the `fio` and `stress-ng` tools can both generate challenging I/O workloads and +verify the integrity of the data they write. Use version 0.12.01 or newer of +`stress-ng` since earlier versions do not have strong enough integrity checks. +You can check that durable writes persist across power outages using a script +such as https://gist.github.com/bradfitz/3172656[`diskchecker.pl`]. + +To narrow down the source of the corruptions, systematically change components +in your cluster's environment until the corruptions stop. The details will +depend on the exact configuration of your hardware, but may include the +following: + +- Try a different filesystem or a different kernel. + +- Try changing each hardware component in turn, ideally changing to a different + model or manufacturer. + +- Try different firmware versions for each hardware component. diff --git a/docs/reference/troubleshooting/fix-common-cluster-issues.asciidoc b/docs/reference/troubleshooting/fix-common-cluster-issues.asciidoc index 7433e25a43947..15876012376c2 100644 --- a/docs/reference/troubleshooting/fix-common-cluster-issues.asciidoc +++ b/docs/reference/troubleshooting/fix-common-cluster-issues.asciidoc @@ -32,10 +32,19 @@ When {es} rejects a request, it stops the operation and returns an error with a A backlogged task queue can prevent tasks from completing and put the cluster into an unhealthy state. +<>:: +There are multiple reasons why shards might get unassigned, ranging from +misconfigured allocation settings to lack of disk space. + +<>:: +A cluster in which nodes leave unexpectedly is unstable and can create several +issues. + include::common-issues/disk-usage-exceeded.asciidoc[] include::common-issues/circuit-breaker-errors.asciidoc[] include::common-issues/high-cpu-usage.asciidoc[] include::common-issues/high-jvm-memory-pressure.asciidoc[] include::common-issues/red-yellow-cluster-status.asciidoc[] include::common-issues/rejected-requests.asciidoc[] -include::common-issues/task-queue-backlog.asciidoc[] \ No newline at end of file +include::common-issues/task-queue-backlog.asciidoc[] +include::common-issues/diagnose-unassigned-shards.asciidoc[] \ No newline at end of file diff --git a/docs/reference/troubleshooting/troubleshooting-searches.asciidoc b/docs/reference/troubleshooting/troubleshooting-searches.asciidoc new file mode 100644 index 0000000000000..3aca266717162 --- /dev/null +++ b/docs/reference/troubleshooting/troubleshooting-searches.asciidoc @@ -0,0 +1,300 @@ + +[[troubleshooting-searches]] +== Troubleshooting searches + +When you query your data, Elasticsearch may return an error, no search results, +or results in an unexpected order. This guide describes how to troubleshoot +searches. + +[discrete] +[[troubleshooting-searches-exists]] +=== Ensure the data stream, index, or alias exists + +Elasticsearch returns an `index_not_found_exception` when the data stream, index +or alias you try to query does not exist. This can happen when you misspell the +name or when the data has been indexed to a different data stream or index. + +Use the <> to check whether a data stream, index, or +alias exists: + +[source,console] +---- +HEAD my-data-stream +---- + +Use the <> to list all data +streams: + +[source,console] +---- +GET /_data_stream/_stats?human=true +---- + +Use the <> to list all indices and their +aliases: + +[source,console] +---- +GET _all?filter_path=*.aliases +---- + +Instead of an error, it is possible to retrieve partial search results if some +of the indices you're querying are unavailable. Set `ignore_unavailable` to +`true`: + +[source,console] +---- +GET /my-alias/_search?ignore_unavailable=true +---- + +[discrete] +[[troubleshooting-searches-data]] +=== Ensure the data stream or index contains data + +When a search request returns no hits, the data stream or index may contain no +data. This can happen when there is a data ingestion issue. For example, the +data may have been indexed to a data stream or index with another name. + +Use the <> to retrieve the number of documents in a data +stream or index. Check that `count` in the response is not 0. + +//// +[source,console] +---- +PUT my-index-000001 +{ + "mappings": { + "properties": { + "my-field": { + "type": "keyword" + }, + "my-num-field": { + "type": "integer" + } + } + } +} +---- +//// + +[source,console] +---- +GET /my-index-000001/_count +---- +//TEST[continued] + +NOTE: When getting no search results in {kib}, check that you have selected the +correct data view and a valid time range. Also, ensure the data view has been +configured with the correct time field. + +[discrete] +[[troubleshooting-searches-field-exists-caps]] +=== Check that the field exists and its capabilities + +Querying a field that does not exist will not return any results. Use the +<> to check whether a field exists: + +[source,console] +---- +GET /my-index-000001/_field_caps?fields=my-field +---- +//TEST[continued] + +If the field does not exist, check the data ingestion process. The field may +have a different name. + +If the field exists, the request will return the field's type and whether it is +searchable and aggregatable. + +[source,console-response] +---- +{ + "indices": [ + "my-index-000001" + ], + "fields": { + "my-field": { + "keyword": { + "type": "keyword", <1> + "metadata_field": false, + "searchable": true, <2> + "aggregatable": true <3> + } + } + } +} +---- + +<1> The field is of type `keyword` in this index. +<2> The field is searchable in this index. +<3> The field is aggregatable in this index. + +[discrete] +[[troubleshooting-searches-mappings]] +=== Check the field's mappings + +A field's capabilities are determined by its <>. To retrieve +the mapping, use the <>: + +[source,console] +---- +GET /my-index-000001/_mappings +---- +//TEST[continued] + +If you query a `text` field, pay attention to the analyzer that may have been +configured. You can use the <> to check how a +field's analyzer processes values and query terms: + +[source,console] +---- +GET /my-index-000001/_analyze +{ + "field" : "my-field", + "text" : "this is a test" +} +---- +//TEST[continued] + +To change the mapping of an existing field, refer to +<>. + +[discrete] +[[troubleshooting-check-field-values]] +=== Check the field's values + +Use the <> to check whether there are +documents that return a value for a field. Check that `count` in the response is +not 0. + +[source,console] +---- +GET /my-index-000001/_count +{ + "query": { + "exists": { + "field": "my-field" + } + } +} +---- +//TEST[continued] + +If the field is aggregatable, you can use <> +to check the field's values. For `keyword` fields, you can use a +<> to retrieve +the field's most common values: + +[source,console] +---- +GET /my-index-000001/_search?filter_path=aggregations +{ + "size": 0, + "aggs": { + "top_values": { + "terms": { + "field": "my-field", + "size": 10 + } + } + } +} +---- +//TEST[continued] + +For numeric fields, you can use the +<> to get an +idea of the field's value distribution: + +[source,console] +---- +GET my-index-000001/_search?filter_path=aggregations +{ + "aggs": { + "my-num-field-stats": { + "stats": { + "field": "my-num-field" + } + } + } +} +---- +//TEST[continued] + +If the field does not return any values, check the data ingestion process. The +field may have a different name. + +[discrete] +[[troubleshooting-searches-validate-explain-profile]] +=== Validate, explain, and profile queries + +When a query returns unexpected results, Elasticsearch offers several tools to +investigate why. + +The <> enables you to validate a query. Use the +`rewrite` parameter to return the Lucene query an Elasticsearch query is +rewritten into: + +[source,console] +-------------------------------------------------- +GET /my-index-000001/_validate/query?rewrite=true +{ + "query": { + "match": { + "user.id": { + "query": "kimchy", + "fuzziness": "auto" + } + } + } +} +-------------------------------------------------- +//TEST[continued] + +Use the <> to find out why a specific document +matches or doesn’t match a query: + +[source,console] +-------------------------------------------------- +GET /my-index-000001/_explain/0 +{ + "query" : { + "match" : { "message" : "elasticsearch" } + } +} +-------------------------------------------------- +// TEST[setup:messages] + +The <> provides detailed timing information about a +search request. For a visual representation of the results, use the +{kibana-ref}/xpack-profiler.html[Search Profiler] in {kib}. + +NOTE: To troubleshoot queries in {kib}, select **Inspect** in the toolbar. Next, +select **Request**. You can now copy the query {kib} sent to {es} for +further analysis in Console. + +[discrete] +[[troubleshooting-searches-settings]] +=== Check index settings + +<> can influence search results. For +example, the `index.query.default_field` setting, which determines the field +that is queried when a query specifies no explicit field. Use the +<> to retrieve the settings for an +index: + +[source,console] +---- +GET /my-index-000001/_settings +---- +//TEST[continued] + +You can update dynamic index settings with the <>. <> requires changing the index template +used by the data stream. + +For static settings, you need to create a new index with the correct settings. +Next, you can reindex the data into that index. For data streams, refer to +<>. \ No newline at end of file diff --git a/gradle/wrapper/gradle-wrapper.properties b/gradle/wrapper/gradle-wrapper.properties index b871071c412e2..e939ec976751d 100644 --- a/gradle/wrapper/gradle-wrapper.properties +++ b/gradle/wrapper/gradle-wrapper.properties @@ -1,6 +1,6 @@ distributionBase=GRADLE_USER_HOME distributionPath=wrapper/dists -distributionUrl=https\://services.gradle.org/distributions/gradle-7.5-all.zip +distributionUrl=https\://services.gradle.org/distributions/gradle-7.5.1-all.zip zipStoreBase=GRADLE_USER_HOME zipStorePath=wrapper/dists -distributionSha256Sum=97a52d145762adc241bad7fd18289bf7f6801e08ece6badf80402fe2b9f250b1 +distributionSha256Sum=db9c8211ed63f61f60292c69e80d89196f9eb36665e369e7f00ac4cc841c2219 diff --git a/modules/lang-expression/licenses/lucene-codecs-9.3.0-snapshot-b8d1fcfd0ec.jar.sha1 b/modules/lang-expression/licenses/lucene-codecs-9.3.0-snapshot-b8d1fcfd0ec.jar.sha1 deleted file mode 100644 index 47b8df46111d4..0000000000000 --- a/modules/lang-expression/licenses/lucene-codecs-9.3.0-snapshot-b8d1fcfd0ec.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -f9d91e4de3468b4c513a82a3d20d9d19137c4311 \ No newline at end of file diff --git a/modules/lang-expression/licenses/lucene-codecs-9.3.0.jar.sha1 b/modules/lang-expression/licenses/lucene-codecs-9.3.0.jar.sha1 new file mode 100644 index 0000000000000..11661ba525168 --- /dev/null +++ b/modules/lang-expression/licenses/lucene-codecs-9.3.0.jar.sha1 @@ -0,0 +1 @@ +da4e2de2008a0e8c33da7177b85225604cb5200e \ No newline at end of file diff --git a/modules/lang-expression/licenses/lucene-expressions-9.3.0-snapshot-b8d1fcfd0ec.jar.sha1 b/modules/lang-expression/licenses/lucene-expressions-9.3.0-snapshot-b8d1fcfd0ec.jar.sha1 deleted file mode 100644 index 544a44a26debb..0000000000000 --- a/modules/lang-expression/licenses/lucene-expressions-9.3.0-snapshot-b8d1fcfd0ec.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -b146dc1d898b3f638328a4d6a64f68cfede251ec \ No newline at end of file diff --git a/modules/lang-expression/licenses/lucene-expressions-9.3.0.jar.sha1 b/modules/lang-expression/licenses/lucene-expressions-9.3.0.jar.sha1 new file mode 100644 index 0000000000000..2d216277b3a8e --- /dev/null +++ b/modules/lang-expression/licenses/lucene-expressions-9.3.0.jar.sha1 @@ -0,0 +1 @@ +5583bcd3a24d3aae40b0a3152458021844ac09aa \ No newline at end of file diff --git a/modules/lang-painless/src/main/resources/org/elasticsearch/painless/org.elasticsearch.script.ingest.txt b/modules/lang-painless/src/main/resources/org/elasticsearch/painless/org.elasticsearch.script.ingest.txt index 4924f93e87c91..a684d8868e136 100644 --- a/modules/lang-painless/src/main/resources/org/elasticsearch/painless/org.elasticsearch.script.ingest.txt +++ b/modules/lang-painless/src/main/resources/org/elasticsearch/painless/org.elasticsearch.script.ingest.txt @@ -35,7 +35,7 @@ class org.elasticsearch.script.Metadata { String getVersionType() void setVersionType(String) - ZonedDateTime getTimestamp() + ZonedDateTime getNow() } class org.elasticsearch.script.IngestScript { diff --git a/modules/lang-painless/src/main/resources/org/elasticsearch/painless/org.elasticsearch.script.update.txt b/modules/lang-painless/src/main/resources/org/elasticsearch/painless/org.elasticsearch.script.update.txt index ee2846331ee0f..f563fca8acad6 100644 --- a/modules/lang-painless/src/main/resources/org/elasticsearch/painless/org.elasticsearch.script.update.txt +++ b/modules/lang-painless/src/main/resources/org/elasticsearch/painless/org.elasticsearch.script.update.txt @@ -26,7 +26,7 @@ class org.elasticsearch.script.Metadata { long getVersion() String getOp() void setOp(String) - ZonedDateTime getTimestamp() + ZonedDateTime getNow() } class org.elasticsearch.script.UpdateScript { diff --git a/modules/legacy-geo/licenses/lucene-spatial-extras-9.3.0-snapshot-b8d1fcfd0ec.jar.sha1 b/modules/legacy-geo/licenses/lucene-spatial-extras-9.3.0-snapshot-b8d1fcfd0ec.jar.sha1 deleted file mode 100644 index 5e9c50d838196..0000000000000 --- a/modules/legacy-geo/licenses/lucene-spatial-extras-9.3.0-snapshot-b8d1fcfd0ec.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -f87c4435a856c612a5799fa89397364a7b2d6f7e \ No newline at end of file diff --git a/modules/legacy-geo/licenses/lucene-spatial-extras-9.3.0.jar.sha1 b/modules/legacy-geo/licenses/lucene-spatial-extras-9.3.0.jar.sha1 new file mode 100644 index 0000000000000..8bbc5359487ff --- /dev/null +++ b/modules/legacy-geo/licenses/lucene-spatial-extras-9.3.0.jar.sha1 @@ -0,0 +1 @@ +c9b226b49ae987a4226791f023562187583eb9ad \ No newline at end of file diff --git a/modules/legacy-geo/licenses/lucene-spatial3d-9.3.0-snapshot-b8d1fcfd0ec.jar.sha1 b/modules/legacy-geo/licenses/lucene-spatial3d-9.3.0-snapshot-b8d1fcfd0ec.jar.sha1 deleted file mode 100644 index 6c74a9716f82f..0000000000000 --- a/modules/legacy-geo/licenses/lucene-spatial3d-9.3.0-snapshot-b8d1fcfd0ec.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -110e8b2e5bced4b8f482ac58a2cf9cd64591b028 \ No newline at end of file diff --git a/modules/legacy-geo/licenses/lucene-spatial3d-9.3.0.jar.sha1 b/modules/legacy-geo/licenses/lucene-spatial3d-9.3.0.jar.sha1 new file mode 100644 index 0000000000000..31132ef0ad6df --- /dev/null +++ b/modules/legacy-geo/licenses/lucene-spatial3d-9.3.0.jar.sha1 @@ -0,0 +1 @@ +201aa61856ae44fa494504591aed54fd9b75af16 \ No newline at end of file diff --git a/modules/repository-s3/src/main/java/org/elasticsearch/repositories/s3/S3Service.java b/modules/repository-s3/src/main/java/org/elasticsearch/repositories/s3/S3Service.java index 8e6c33e947c13..2788bac6600b9 100644 --- a/modules/repository-s3/src/main/java/org/elasticsearch/repositories/s3/S3Service.java +++ b/modules/repository-s3/src/main/java/org/elasticsearch/repositories/s3/S3Service.java @@ -39,6 +39,7 @@ import java.nio.file.Files; import java.nio.file.Path; import java.time.Clock; +import java.util.List; import java.util.Map; import java.util.Objects; @@ -223,7 +224,12 @@ static AWSCredentialsProvider buildCredentials( if (webIdentityTokenCredentialsProvider.isActive()) { logger.debug("Using a custom provider chain of Web Identity Token and instance profile credentials"); return new PrivilegedAWSCredentialsProvider( - new AWSCredentialsProviderChain(webIdentityTokenCredentialsProvider, new EC2ContainerCredentialsProviderWrapper()) + new AWSCredentialsProviderChain( + List.of( + new ErrorLoggingCredentialsProvider(webIdentityTokenCredentialsProvider, LOGGER), + new ErrorLoggingCredentialsProvider(new EC2ContainerCredentialsProviderWrapper(), LOGGER) + ) + ) ); } else { logger.debug("Using instance profile credentials"); @@ -375,6 +381,37 @@ public void shutdown() throws IOException { } } + static class ErrorLoggingCredentialsProvider implements AWSCredentialsProvider { + + private final AWSCredentialsProvider delegate; + private final Logger logger; + + ErrorLoggingCredentialsProvider(AWSCredentialsProvider delegate, Logger logger) { + this.delegate = Objects.requireNonNull(delegate); + this.logger = Objects.requireNonNull(logger); + } + + @Override + public AWSCredentials getCredentials() { + try { + return delegate.getCredentials(); + } catch (Exception e) { + logger.error(() -> "Unable to load credentials from " + delegate, e); + throw e; + } + } + + @Override + public void refresh() { + try { + delegate.refresh(); + } catch (Exception e) { + logger.error(() -> "Unable to refresh " + delegate, e); + throw e; + } + } + } + @FunctionalInterface interface SystemEnvironment { String getEnv(String name); diff --git a/modules/repository-s3/src/test/java/org/elasticsearch/repositories/s3/AwsS3ServiceImplTests.java b/modules/repository-s3/src/test/java/org/elasticsearch/repositories/s3/AwsS3ServiceImplTests.java index 0907c73e5b17a..ac97346f717a7 100644 --- a/modules/repository-s3/src/test/java/org/elasticsearch/repositories/s3/AwsS3ServiceImplTests.java +++ b/modules/repository-s3/src/test/java/org/elasticsearch/repositories/s3/AwsS3ServiceImplTests.java @@ -17,17 +17,22 @@ import com.amazonaws.auth.BasicAWSCredentials; import com.amazonaws.auth.EC2ContainerCredentialsProviderWrapper; +import org.apache.logging.log4j.Logger; +import org.apache.logging.log4j.util.Supplier; import org.elasticsearch.common.settings.MockSecureSettings; import org.elasticsearch.common.settings.Settings; import org.elasticsearch.test.ESTestCase; +import org.mockito.ArgumentCaptor; import org.mockito.Mockito; import java.util.Locale; import java.util.Map; import static org.hamcrest.Matchers.containsString; +import static org.hamcrest.Matchers.equalTo; import static org.hamcrest.Matchers.instanceOf; import static org.hamcrest.Matchers.is; +import static org.hamcrest.Matchers.startsWith; public class AwsS3ServiceImplTests extends ESTestCase { @@ -229,4 +234,40 @@ private void assertEndpoint(Settings repositorySettings, Settings settings, Stri assertThat(clientSettings.endpoint, is(expectedEndpoint)); } + public void testLoggingCredentialsProviderCatchesErrors() { + var mockProvider = Mockito.mock(AWSCredentialsProvider.class); + String mockProviderErrorMessage = "mockProvider failed to generate credentials"; + Mockito.when(mockProvider.getCredentials()).thenThrow(new IllegalStateException(mockProviderErrorMessage)); + var mockLogger = Mockito.mock(Logger.class); + + var credentialsProvider = new S3Service.ErrorLoggingCredentialsProvider(mockProvider, mockLogger); + var exception = expectThrows(IllegalStateException.class, credentialsProvider::getCredentials); + assertEquals(mockProviderErrorMessage, exception.getMessage()); + + var messageSupplierCaptor = ArgumentCaptor.forClass(Supplier.class); + var throwableCaptor = ArgumentCaptor.forClass(Throwable.class); + Mockito.verify(mockLogger).error(messageSupplierCaptor.capture(), throwableCaptor.capture()); + + assertThat(messageSupplierCaptor.getValue().get().toString(), startsWith("Unable to load credentials from")); + assertThat(throwableCaptor.getValue().getMessage(), equalTo(mockProviderErrorMessage)); + } + + public void testLoggingCredentialsProviderCatchesErrorsOnRefresh() { + var mockProvider = Mockito.mock(AWSCredentialsProvider.class); + String mockProviderErrorMessage = "mockProvider failed to refresh"; + Mockito.doThrow(new IllegalStateException(mockProviderErrorMessage)).when(mockProvider).refresh(); + var mockLogger = Mockito.mock(Logger.class); + + var credentialsProvider = new S3Service.ErrorLoggingCredentialsProvider(mockProvider, mockLogger); + var exception = expectThrows(IllegalStateException.class, credentialsProvider::refresh); + assertEquals(mockProviderErrorMessage, exception.getMessage()); + + var messageSupplierCaptor = ArgumentCaptor.forClass(Supplier.class); + var throwableCaptor = ArgumentCaptor.forClass(Throwable.class); + Mockito.verify(mockLogger).error(messageSupplierCaptor.capture(), throwableCaptor.capture()); + + assertThat(messageSupplierCaptor.getValue().get().toString(), startsWith("Unable to refresh")); + assertThat(throwableCaptor.getValue().getMessage(), equalTo(mockProviderErrorMessage)); + } + } diff --git a/modules/runtime-fields-common/src/yamlRestTest/resources/rest-api-spec/test/runtime_fields/111_search_time_composite.yml b/modules/runtime-fields-common/src/yamlRestTest/resources/rest-api-spec/test/runtime_fields/111_search_time_composite.yml new file mode 100644 index 0000000000000..dc10d16c26ca3 --- /dev/null +++ b/modules/runtime-fields-common/src/yamlRestTest/resources/rest-api-spec/test/runtime_fields/111_search_time_composite.yml @@ -0,0 +1,36 @@ +--- +setup: + - do: + indices.create: + index: test + body: + settings: + number_of_shards: 2 + number_of_replicas: 0 + - do: + bulk: + index: test + refresh: true + body: | + {"index":{}} + {"A":2} + +--- +"search-time composite across multiple shards": + - do: + search: + index: test + body: + query: + term: + "r.shouldReturn" : true + runtime_mappings: + r: + type: composite + fields: + shouldReturn: + type: boolean + script: + source: "emit('shouldReturn',true)" + + - match: {hits.total.value: 1} diff --git a/modules/transport-netty4/src/main/java/module-info.java b/modules/transport-netty4/src/main/java/module-info.java index cb718539d0f11..92217b419c666 100644 --- a/modules/transport-netty4/src/main/java/module-info.java +++ b/modules/transport-netty4/src/main/java/module-info.java @@ -7,6 +7,7 @@ */ module org.elasticsearch.transport.netty4 { + requires jdk.net; requires org.elasticsearch.base; requires org.elasticsearch.server; requires org.elasticsearch.xcontent; diff --git a/modules/transport-netty4/src/main/java/org/elasticsearch/http/netty4/Netty4HttpServerTransport.java b/modules/transport-netty4/src/main/java/org/elasticsearch/http/netty4/Netty4HttpServerTransport.java index 7fa8ca28aa31b..5f49e2505cbf6 100644 --- a/modules/transport-netty4/src/main/java/org/elasticsearch/http/netty4/Netty4HttpServerTransport.java +++ b/modules/transport-netty4/src/main/java/org/elasticsearch/http/netty4/Netty4HttpServerTransport.java @@ -57,7 +57,6 @@ import org.elasticsearch.xcontent.NamedXContentRegistry; import java.net.InetSocketAddress; -import java.net.SocketOption; import java.util.concurrent.TimeUnit; import static org.elasticsearch.http.HttpTransportSettings.SETTING_HTTP_MAX_CHUNK_SIZE; @@ -215,25 +214,22 @@ protected void doStart() { // Netty logs a warning if it can't set the option, so try this only on supported platforms if (IOUtils.LINUX || IOUtils.MAC_OS_X) { if (SETTING_HTTP_TCP_KEEP_IDLE.get(settings) >= 0) { - final SocketOption keepIdleOption = NetUtils.getTcpKeepIdleSocketOptionOrNull(); - if (keepIdleOption != null) { - serverBootstrap.childOption(NioChannelOption.of(keepIdleOption), SETTING_HTTP_TCP_KEEP_IDLE.get(settings)); - } + serverBootstrap.childOption( + NioChannelOption.of(NetUtils.getTcpKeepIdleSocketOption()), + SETTING_HTTP_TCP_KEEP_IDLE.get(settings) + ); } if (SETTING_HTTP_TCP_KEEP_INTERVAL.get(settings) >= 0) { - final SocketOption keepIntervalOption = NetUtils.getTcpKeepIntervalSocketOptionOrNull(); - if (keepIntervalOption != null) { - serverBootstrap.childOption( - NioChannelOption.of(keepIntervalOption), - SETTING_HTTP_TCP_KEEP_INTERVAL.get(settings) - ); - } + serverBootstrap.childOption( + NioChannelOption.of(NetUtils.getTcpKeepIntervalSocketOption()), + SETTING_HTTP_TCP_KEEP_INTERVAL.get(settings) + ); } if (SETTING_HTTP_TCP_KEEP_COUNT.get(settings) >= 0) { - final SocketOption keepCountOption = NetUtils.getTcpKeepCountSocketOptionOrNull(); - if (keepCountOption != null) { - serverBootstrap.childOption(NioChannelOption.of(keepCountOption), SETTING_HTTP_TCP_KEEP_COUNT.get(settings)); - } + serverBootstrap.childOption( + NioChannelOption.of(NetUtils.getTcpKeepCountSocketOption()), + SETTING_HTTP_TCP_KEEP_COUNT.get(settings) + ); } } } diff --git a/modules/transport-netty4/src/main/java/org/elasticsearch/transport/netty4/NetUtils.java b/modules/transport-netty4/src/main/java/org/elasticsearch/transport/netty4/NetUtils.java index ffd423a7b092a..6c93b6036578d 100644 --- a/modules/transport-netty4/src/main/java/org/elasticsearch/transport/netty4/NetUtils.java +++ b/modules/transport-netty4/src/main/java/org/elasticsearch/transport/netty4/NetUtils.java @@ -8,12 +8,15 @@ package org.elasticsearch.transport.netty4; +import jdk.net.ExtendedSocketOptions; + +import org.elasticsearch.core.SuppressForbidden; + import java.io.IOException; -import java.lang.reflect.Field; import java.net.SocketOption; import java.net.StandardSocketOptions; import java.nio.channels.NetworkChannel; -import java.util.Arrays; +import java.util.Objects; /** * Utilities for network-related methods. @@ -22,37 +25,31 @@ public class NetUtils { private NetUtils() {} + // Accessors to the extended socket options reduce the proliferation of the non-portable + // ExtendedSocketOptions type. + /** - * Returns the extended TCP_KEEPIDLE socket option, if available on this JDK + * Returns the extended TCP_KEEPIDLE socket option. */ - public static SocketOption getTcpKeepIdleSocketOptionOrNull() { - return getExtendedSocketOptionOrNull("TCP_KEEPIDLE"); + @SuppressForbidden(reason = "access to non-portable socket option required") + public static SocketOption getTcpKeepIdleSocketOption() { + return ExtendedSocketOptions.TCP_KEEPIDLE; } /** - * Returns the extended TCP_KEEPINTERVAL socket option, if available on this JDK + * Returns the extended TCP_KEEPINTERVAL socket option. */ - public static SocketOption getTcpKeepIntervalSocketOptionOrNull() { - return getExtendedSocketOptionOrNull("TCP_KEEPINTERVAL"); + @SuppressForbidden(reason = "access to non-portable socket option required") + public static SocketOption getTcpKeepIntervalSocketOption() { + return ExtendedSocketOptions.TCP_KEEPINTERVAL; } /** - * Returns the extended TCP_KEEPCOUNT socket option, if available on this JDK + * Returns the extended TCP_KEEPCOUNT socket option. */ - public static SocketOption getTcpKeepCountSocketOptionOrNull() { - return getExtendedSocketOptionOrNull("TCP_KEEPCOUNT"); - } - - @SuppressWarnings("unchecked") - private static SocketOption getExtendedSocketOptionOrNull(String fieldName) { - try { - final Class extendedSocketOptionsClass = Class.forName("jdk.net.ExtendedSocketOptions"); - final Field field = extendedSocketOptionsClass.getField(fieldName); - return (SocketOption) field.get(null); - } catch (Exception t) { - // ignore - return null; - } + @SuppressForbidden(reason = "access to non-portable socket option required") + public static SocketOption getTcpKeepCountSocketOption() { + return ExtendedSocketOptions.TCP_KEEPCOUNT; } /** @@ -67,13 +64,9 @@ public static void tryEnsureReasonableKeepAliveConfig(NetworkChannel socketChann if (socketChannel.supportedOptions().contains(StandardSocketOptions.SO_KEEPALIVE)) { final Boolean keepalive = socketChannel.getOption(StandardSocketOptions.SO_KEEPALIVE); assert keepalive != null; - if (keepalive.booleanValue()) { - for (SocketOption option : Arrays.asList( - NetUtils.getTcpKeepIdleSocketOptionOrNull(), - NetUtils.getTcpKeepIntervalSocketOptionOrNull() - )) { - setMinValueForSocketOption(socketChannel, option, 300); - } + if (keepalive) { + setMinValueForSocketOption(socketChannel, getTcpKeepIdleSocketOption(), 300); + setMinValueForSocketOption(socketChannel, getTcpKeepIntervalSocketOption(), 300); } } } catch (Exception e) { @@ -84,7 +77,8 @@ public static void tryEnsureReasonableKeepAliveConfig(NetworkChannel socketChann } private static void setMinValueForSocketOption(NetworkChannel socketChannel, SocketOption option, int minValue) { - if (option != null && socketChannel.supportedOptions().contains(option)) { + Objects.requireNonNull(option); + if (socketChannel.supportedOptions().contains(option)) { try { final Integer currentIdleVal = socketChannel.getOption(option); assert currentIdleVal != null; diff --git a/modules/transport-netty4/src/main/java/org/elasticsearch/transport/netty4/Netty4Transport.java b/modules/transport-netty4/src/main/java/org/elasticsearch/transport/netty4/Netty4Transport.java index 37c87d19e811f..0241669d15b8e 100644 --- a/modules/transport-netty4/src/main/java/org/elasticsearch/transport/netty4/Netty4Transport.java +++ b/modules/transport-netty4/src/main/java/org/elasticsearch/transport/netty4/Netty4Transport.java @@ -47,7 +47,6 @@ import java.io.IOException; import java.net.InetSocketAddress; -import java.net.SocketOption; import java.util.Map; import static org.elasticsearch.common.settings.Setting.byteSizeSetting; @@ -165,22 +164,19 @@ private Bootstrap createClientBootstrap(SharedGroupFactory.SharedGroup sharedGro if (TransportSettings.TCP_KEEP_ALIVE.get(settings)) { // Note that Netty logs a warning if it can't set the option if (TransportSettings.TCP_KEEP_IDLE.get(settings) >= 0) { - final SocketOption keepIdleOption = NetUtils.getTcpKeepIdleSocketOptionOrNull(); - if (keepIdleOption != null) { - bootstrap.option(NioChannelOption.of(keepIdleOption), TransportSettings.TCP_KEEP_IDLE.get(settings)); - } + bootstrap.option(NioChannelOption.of(NetUtils.getTcpKeepIdleSocketOption()), TransportSettings.TCP_KEEP_IDLE.get(settings)); } if (TransportSettings.TCP_KEEP_INTERVAL.get(settings) >= 0) { - final SocketOption keepIntervalOption = NetUtils.getTcpKeepIntervalSocketOptionOrNull(); - if (keepIntervalOption != null) { - bootstrap.option(NioChannelOption.of(keepIntervalOption), TransportSettings.TCP_KEEP_INTERVAL.get(settings)); - } + bootstrap.option( + NioChannelOption.of(NetUtils.getTcpKeepIntervalSocketOption()), + TransportSettings.TCP_KEEP_INTERVAL.get(settings) + ); } if (TransportSettings.TCP_KEEP_COUNT.get(settings) >= 0) { - final SocketOption keepCountOption = NetUtils.getTcpKeepCountSocketOptionOrNull(); - if (keepCountOption != null) { - bootstrap.option(NioChannelOption.of(keepCountOption), TransportSettings.TCP_KEEP_COUNT.get(settings)); - } + bootstrap.option( + NioChannelOption.of(NetUtils.getTcpKeepCountSocketOption()), + TransportSettings.TCP_KEEP_COUNT.get(settings) + ); } } @@ -236,23 +232,16 @@ private void createServerBootstrap(ProfileSettings profileSettings, SharedGroupF if (profileSettings.tcpKeepAlive) { // Note that Netty logs a warning if it can't set the option if (profileSettings.tcpKeepIdle >= 0) { - final SocketOption keepIdleOption = NetUtils.getTcpKeepIdleSocketOptionOrNull(); - if (keepIdleOption != null) { - serverBootstrap.childOption(NioChannelOption.of(keepIdleOption), profileSettings.tcpKeepIdle); - } + serverBootstrap.childOption(NioChannelOption.of(NetUtils.getTcpKeepIdleSocketOption()), profileSettings.tcpKeepIdle); } if (profileSettings.tcpKeepInterval >= 0) { - final SocketOption keepIntervalOption = NetUtils.getTcpKeepIntervalSocketOptionOrNull(); - if (keepIntervalOption != null) { - serverBootstrap.childOption(NioChannelOption.of(keepIntervalOption), profileSettings.tcpKeepInterval); - } - + serverBootstrap.childOption( + NioChannelOption.of(NetUtils.getTcpKeepIntervalSocketOption()), + profileSettings.tcpKeepInterval + ); } if (profileSettings.tcpKeepCount >= 0) { - final SocketOption keepCountOption = NetUtils.getTcpKeepCountSocketOptionOrNull(); - if (keepCountOption != null) { - serverBootstrap.childOption(NioChannelOption.of(keepCountOption), profileSettings.tcpKeepCount); - } + serverBootstrap.childOption(NioChannelOption.of(NetUtils.getTcpKeepCountSocketOption()), profileSettings.tcpKeepCount); } } diff --git a/modules/transport-netty4/src/test/java/org/elasticsearch/transport/netty4/NetUtilsTests.java b/modules/transport-netty4/src/test/java/org/elasticsearch/transport/netty4/NetUtilsTests.java index 6cea1296f2e7a..1a4e7b3fc1565 100644 --- a/modules/transport-netty4/src/test/java/org/elasticsearch/transport/netty4/NetUtilsTests.java +++ b/modules/transport-netty4/src/test/java/org/elasticsearch/transport/netty4/NetUtilsTests.java @@ -8,17 +8,38 @@ package org.elasticsearch.transport.netty4; -import org.apache.lucene.util.Constants; import org.elasticsearch.core.IOUtils; import org.elasticsearch.test.ESTestCase; +import java.io.IOException; +import java.io.UncheckedIOException; +import java.nio.channels.NetworkChannel; +import java.nio.channels.SocketChannel; + +import static org.hamcrest.Matchers.hasItem; + public class NetUtilsTests extends ESTestCase { - public void testExtendedSocketOptions() { - assumeTrue("JDK possibly not supported", Constants.JVM_NAME.contains("HotSpot") || Constants.JVM_NAME.contains("OpenJDK")); + public void testExtendedSocketOptions() throws IOException { + assertTrue( + "jdk.net module not resolved", + ModuleLayer.boot().modules().stream().map(Module::getName).anyMatch(nm -> nm.equals("jdk.net")) + ); + assumeTrue("Platform possibly not supported", IOUtils.LINUX || IOUtils.MAC_OS_X); - assertNotNull(NetUtils.getTcpKeepIdleSocketOptionOrNull()); - assertNotNull(NetUtils.getTcpKeepIntervalSocketOptionOrNull()); - assertNotNull(NetUtils.getTcpKeepCountSocketOptionOrNull()); + try (var channel = networkChannel()) { + var options = channel.supportedOptions(); + assertThat(options, hasItem(NetUtils.getTcpKeepIdleSocketOption())); + assertThat(options, hasItem(NetUtils.getTcpKeepIntervalSocketOption())); + assertThat(options, hasItem(NetUtils.getTcpKeepCountSocketOption())); + } + } + + private static NetworkChannel networkChannel() { + try { + return SocketChannel.open(); + } catch (IOException e) { + throw new UncheckedIOException(e); + } } } diff --git a/modules/transport-netty4/src/test/java/org/elasticsearch/transport/netty4/SimpleNetty4TransportTests.java b/modules/transport-netty4/src/test/java/org/elasticsearch/transport/netty4/SimpleNetty4TransportTests.java index 253758e378856..91a31f19f0e3b 100644 --- a/modules/transport-netty4/src/test/java/org/elasticsearch/transport/netty4/SimpleNetty4TransportTests.java +++ b/modules/transport-netty4/src/test/java/org/elasticsearch/transport/netty4/SimpleNetty4TransportTests.java @@ -125,12 +125,12 @@ private void checkDefaultKeepAliveOptions(TcpChannel channel) throws IOException assertThat(nettyChannel.getNettyChannel(), instanceOf(Netty4NioSocketChannel.class)); Netty4NioSocketChannel netty4NioSocketChannel = (Netty4NioSocketChannel) nettyChannel.getNettyChannel(); SocketChannel socketChannel = netty4NioSocketChannel.javaChannel(); - assertThat(socketChannel.supportedOptions(), hasItem(NetUtils.getTcpKeepIdleSocketOptionOrNull())); - Integer keepIdle = socketChannel.getOption(NetUtils.getTcpKeepIdleSocketOptionOrNull()); + assertThat(socketChannel.supportedOptions(), hasItem(NetUtils.getTcpKeepIdleSocketOption())); + Integer keepIdle = socketChannel.getOption(NetUtils.getTcpKeepIdleSocketOption()); assertNotNull(keepIdle); assertThat(keepIdle, lessThanOrEqualTo(500)); - assertThat(socketChannel.supportedOptions(), hasItem(NetUtils.getTcpKeepIntervalSocketOptionOrNull())); - Integer keepInterval = socketChannel.getOption(NetUtils.getTcpKeepIntervalSocketOptionOrNull()); + assertThat(socketChannel.supportedOptions(), hasItem(NetUtils.getTcpKeepIntervalSocketOption())); + Integer keepInterval = socketChannel.getOption(NetUtils.getTcpKeepIntervalSocketOption()); assertNotNull(keepInterval); assertThat(keepInterval, lessThanOrEqualTo(500)); } diff --git a/plugins/analysis-icu/licenses/lucene-analysis-icu-9.3.0-snapshot-b8d1fcfd0ec.jar.sha1 b/plugins/analysis-icu/licenses/lucene-analysis-icu-9.3.0-snapshot-b8d1fcfd0ec.jar.sha1 deleted file mode 100644 index ceea8ba4f6855..0000000000000 --- a/plugins/analysis-icu/licenses/lucene-analysis-icu-9.3.0-snapshot-b8d1fcfd0ec.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -501aa4f0028424a994b06627f30ffb36150ffbe2 \ No newline at end of file diff --git a/plugins/analysis-icu/licenses/lucene-analysis-icu-9.3.0.jar.sha1 b/plugins/analysis-icu/licenses/lucene-analysis-icu-9.3.0.jar.sha1 new file mode 100644 index 0000000000000..df4ae8d72dd2b --- /dev/null +++ b/plugins/analysis-icu/licenses/lucene-analysis-icu-9.3.0.jar.sha1 @@ -0,0 +1 @@ +11dd9be0448fe594cf918f5260e193b3ab4e07a0 \ No newline at end of file diff --git a/plugins/analysis-kuromoji/licenses/lucene-analysis-kuromoji-9.3.0-snapshot-b8d1fcfd0ec.jar.sha1 b/plugins/analysis-kuromoji/licenses/lucene-analysis-kuromoji-9.3.0-snapshot-b8d1fcfd0ec.jar.sha1 deleted file mode 100644 index 929be5cd0d86f..0000000000000 --- a/plugins/analysis-kuromoji/licenses/lucene-analysis-kuromoji-9.3.0-snapshot-b8d1fcfd0ec.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -a4a84f37391ab5da0697ba6344555b633aa4bacd \ No newline at end of file diff --git a/plugins/analysis-kuromoji/licenses/lucene-analysis-kuromoji-9.3.0.jar.sha1 b/plugins/analysis-kuromoji/licenses/lucene-analysis-kuromoji-9.3.0.jar.sha1 new file mode 100644 index 0000000000000..675bf726d2a65 --- /dev/null +++ b/plugins/analysis-kuromoji/licenses/lucene-analysis-kuromoji-9.3.0.jar.sha1 @@ -0,0 +1 @@ +87c1357612f2f483174d1a63ea8c6680a1696bac \ No newline at end of file diff --git a/plugins/analysis-nori/licenses/lucene-analysis-nori-9.3.0-snapshot-b8d1fcfd0ec.jar.sha1 b/plugins/analysis-nori/licenses/lucene-analysis-nori-9.3.0-snapshot-b8d1fcfd0ec.jar.sha1 deleted file mode 100644 index 60c85b324c183..0000000000000 --- a/plugins/analysis-nori/licenses/lucene-analysis-nori-9.3.0-snapshot-b8d1fcfd0ec.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -012f177949d83aa7bdf26c309f5569f67d1c65b5 \ No newline at end of file diff --git a/plugins/analysis-nori/licenses/lucene-analysis-nori-9.3.0.jar.sha1 b/plugins/analysis-nori/licenses/lucene-analysis-nori-9.3.0.jar.sha1 new file mode 100644 index 0000000000000..8987f89c913df --- /dev/null +++ b/plugins/analysis-nori/licenses/lucene-analysis-nori-9.3.0.jar.sha1 @@ -0,0 +1 @@ +5d032dbeb3f4015741336a877dd4b0e62099246c \ No newline at end of file diff --git a/plugins/analysis-phonetic/licenses/lucene-analysis-phonetic-9.3.0-snapshot-b8d1fcfd0ec.jar.sha1 b/plugins/analysis-phonetic/licenses/lucene-analysis-phonetic-9.3.0-snapshot-b8d1fcfd0ec.jar.sha1 deleted file mode 100644 index 782b48c8fd4df..0000000000000 --- a/plugins/analysis-phonetic/licenses/lucene-analysis-phonetic-9.3.0-snapshot-b8d1fcfd0ec.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -792f50d6cd8b75c277c514f2f6e9914572942dfe \ No newline at end of file diff --git a/plugins/analysis-phonetic/licenses/lucene-analysis-phonetic-9.3.0.jar.sha1 b/plugins/analysis-phonetic/licenses/lucene-analysis-phonetic-9.3.0.jar.sha1 new file mode 100644 index 0000000000000..00d66c733c548 --- /dev/null +++ b/plugins/analysis-phonetic/licenses/lucene-analysis-phonetic-9.3.0.jar.sha1 @@ -0,0 +1 @@ +fe6ac8772b545e0abd0c755cd4bd07caad58edb9 \ No newline at end of file diff --git a/plugins/analysis-smartcn/licenses/lucene-analysis-smartcn-9.3.0-snapshot-b8d1fcfd0ec.jar.sha1 b/plugins/analysis-smartcn/licenses/lucene-analysis-smartcn-9.3.0-snapshot-b8d1fcfd0ec.jar.sha1 deleted file mode 100644 index 4b9ceb4a4581f..0000000000000 --- a/plugins/analysis-smartcn/licenses/lucene-analysis-smartcn-9.3.0-snapshot-b8d1fcfd0ec.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -b1488267195c87749dcc42de6b2f665d24ff8d9e \ No newline at end of file diff --git a/plugins/analysis-smartcn/licenses/lucene-analysis-smartcn-9.3.0.jar.sha1 b/plugins/analysis-smartcn/licenses/lucene-analysis-smartcn-9.3.0.jar.sha1 new file mode 100644 index 0000000000000..0c521b5f5ef6a --- /dev/null +++ b/plugins/analysis-smartcn/licenses/lucene-analysis-smartcn-9.3.0.jar.sha1 @@ -0,0 +1 @@ +288726e13b598c341e81aef8b5c9ce53f51889d0 \ No newline at end of file diff --git a/plugins/analysis-stempel/licenses/lucene-analysis-stempel-9.3.0-snapshot-b8d1fcfd0ec.jar.sha1 b/plugins/analysis-stempel/licenses/lucene-analysis-stempel-9.3.0-snapshot-b8d1fcfd0ec.jar.sha1 deleted file mode 100644 index 45ccdbf538570..0000000000000 --- a/plugins/analysis-stempel/licenses/lucene-analysis-stempel-9.3.0-snapshot-b8d1fcfd0ec.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -c78968b087eaf2a95ed3b67540efc32455bab84d \ No newline at end of file diff --git a/plugins/analysis-stempel/licenses/lucene-analysis-stempel-9.3.0.jar.sha1 b/plugins/analysis-stempel/licenses/lucene-analysis-stempel-9.3.0.jar.sha1 new file mode 100644 index 0000000000000..ba98dd7e06f71 --- /dev/null +++ b/plugins/analysis-stempel/licenses/lucene-analysis-stempel-9.3.0.jar.sha1 @@ -0,0 +1 @@ +166d02f7f98f18c6607335030a404fcad8f57cd6 \ No newline at end of file diff --git a/plugins/analysis-ukrainian/licenses/lucene-analysis-morfologik-9.3.0-snapshot-b8d1fcfd0ec.jar.sha1 b/plugins/analysis-ukrainian/licenses/lucene-analysis-morfologik-9.3.0-snapshot-b8d1fcfd0ec.jar.sha1 deleted file mode 100644 index 91d9d4c9452b2..0000000000000 --- a/plugins/analysis-ukrainian/licenses/lucene-analysis-morfologik-9.3.0-snapshot-b8d1fcfd0ec.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -978ee14dad7edab6384d04655ce1db219547b6d8 \ No newline at end of file diff --git a/plugins/analysis-ukrainian/licenses/lucene-analysis-morfologik-9.3.0.jar.sha1 b/plugins/analysis-ukrainian/licenses/lucene-analysis-morfologik-9.3.0.jar.sha1 new file mode 100644 index 0000000000000..88ac9a13e8ce3 --- /dev/null +++ b/plugins/analysis-ukrainian/licenses/lucene-analysis-morfologik-9.3.0.jar.sha1 @@ -0,0 +1 @@ +3c0e4177aa87a4be2826a360f656f3559ea3f997 \ No newline at end of file diff --git a/plugins/examples/gradle/wrapper/gradle-wrapper.properties b/plugins/examples/gradle/wrapper/gradle-wrapper.properties index b871071c412e2..e939ec976751d 100644 --- a/plugins/examples/gradle/wrapper/gradle-wrapper.properties +++ b/plugins/examples/gradle/wrapper/gradle-wrapper.properties @@ -1,6 +1,6 @@ distributionBase=GRADLE_USER_HOME distributionPath=wrapper/dists -distributionUrl=https\://services.gradle.org/distributions/gradle-7.5-all.zip +distributionUrl=https\://services.gradle.org/distributions/gradle-7.5.1-all.zip zipStoreBase=GRADLE_USER_HOME zipStorePath=wrapper/dists -distributionSha256Sum=97a52d145762adc241bad7fd18289bf7f6801e08ece6badf80402fe2b9f250b1 +distributionSha256Sum=db9c8211ed63f61f60292c69e80d89196f9eb36665e369e7f00ac4cc841c2219 diff --git a/qa/smoke-test-http/src/javaRestTest/java/org/elasticsearch/http/RestClusterInfoActionCancellationIT.java b/qa/smoke-test-http/src/javaRestTest/java/org/elasticsearch/http/RestClusterInfoActionCancellationIT.java index 9db7c6893c433..0594191ed3c8f 100644 --- a/qa/smoke-test-http/src/javaRestTest/java/org/elasticsearch/http/RestClusterInfoActionCancellationIT.java +++ b/qa/smoke-test-http/src/javaRestTest/java/org/elasticsearch/http/RestClusterInfoActionCancellationIT.java @@ -26,6 +26,7 @@ import org.elasticsearch.core.TimeValue; import org.elasticsearch.rest.RestStatus; import org.elasticsearch.test.ESIntegTestCase; +import org.elasticsearch.test.junit.annotations.TestLogging; import java.util.EnumSet; import java.util.concurrent.CancellationException; @@ -38,6 +39,7 @@ import static org.hamcrest.core.IsEqual.equalTo; @ESIntegTestCase.ClusterScope(scope = ESIntegTestCase.Scope.TEST, numDataNodes = 0, numClientNodes = 0) +@TestLogging(value = "org.elasticsearch.tasks.TaskManager:TRACE,org.elasticsearch.test.TaskAssertions:TRACE", reason = "debugging") public class RestClusterInfoActionCancellationIT extends HttpSmokeTestCase { public void testGetMappingsCancellation() throws Exception { diff --git a/rest-api-spec/src/yamlRestTest/resources/rest-api-spec/test/search.vectors/40_knn_search.yml b/rest-api-spec/src/yamlRestTest/resources/rest-api-spec/test/search.vectors/40_knn_search.yml index f2c5d635c1cc6..0ffec61788a77 100644 --- a/rest-api-spec/src/yamlRestTest/resources/rest-api-spec/test/search.vectors/40_knn_search.yml +++ b/rest-api-spec/src/yamlRestTest/resources/rest-api-spec/test/search.vectors/40_knn_search.yml @@ -160,7 +160,11 @@ setup: --- "kNN search in _knn_search endpoint": + - skip: + features: ["allowed_warnings"] - do: + allowed_warnings: + - "The kNN search API has been replaced by the `knn` option in the search API." knn_search: index: test body: @@ -182,7 +186,10 @@ setup: - skip: version: ' - 8.1.99' reason: 'kNN with filtering added in 8.2' + features: ["allowed_warnings"] - do: + allowed_warnings: + - "The kNN search API has been replaced by the `knn` option in the search API." knn_search: index: test body: @@ -201,6 +208,8 @@ setup: - match: {hits.hits.0.fields.name.0: "rabbit.jpg"} - do: + allowed_warnings: + - "The kNN search API has been replaced by the `knn` option in the search API." knn_search: index: test body: diff --git a/rest-api-spec/src/yamlRestTest/resources/rest-api-spec/test/search.vectors/50_dense_vector_field_usage.yml b/rest-api-spec/src/yamlRestTest/resources/rest-api-spec/test/search.vectors/50_dense_vector_field_usage.yml index c3ce4e7bd5f76..854543f7b2144 100644 --- a/rest-api-spec/src/yamlRestTest/resources/rest-api-spec/test/search.vectors/50_dense_vector_field_usage.yml +++ b/rest-api-spec/src/yamlRestTest/resources/rest-api-spec/test/search.vectors/50_dense_vector_field_usage.yml @@ -24,6 +24,7 @@ setup: - do: index: index: futest + id: "1" body: name: cow.jpg vector: [ 230.0, 300.33, -34.8988, 15.555, -200.0 ] @@ -52,7 +53,10 @@ setup: - skip: version: ' - 8.0.99' reason: 'dense_vector field usage was added in 8.1' + features: ["allowed_warnings"] - do: + allowed_warnings: + - "The kNN search API has been replaced by the `knn` option in the search API." knn_search: index: futest body: @@ -63,12 +67,6 @@ setup: k: 2 num_candidates: 3 - - match: {hits.hits.0._id: "2"} - - match: {hits.hits.0.fields.name.0: "moose.jpg"} - - - match: {hits.hits.1._id: "3"} - - match: {hits.hits.1.fields.name.0: "rabbit.jpg"} - - do: indices.field_usage_stats: { index: futest } diff --git a/server/licenses/lucene-analysis-common-9.3.0-snapshot-b8d1fcfd0ec.jar.sha1 b/server/licenses/lucene-analysis-common-9.3.0-snapshot-b8d1fcfd0ec.jar.sha1 deleted file mode 100644 index 6782780d6cbd4..0000000000000 --- a/server/licenses/lucene-analysis-common-9.3.0-snapshot-b8d1fcfd0ec.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -7a154a194ea505d27b538270ee2db2b5a4a38371 \ No newline at end of file diff --git a/server/licenses/lucene-analysis-common-9.3.0.jar.sha1 b/server/licenses/lucene-analysis-common-9.3.0.jar.sha1 new file mode 100644 index 0000000000000..2e260eb028f4c --- /dev/null +++ b/server/licenses/lucene-analysis-common-9.3.0.jar.sha1 @@ -0,0 +1 @@ +03496708a19a8a55a0dc4f61f8aa2febc6e8977c \ No newline at end of file diff --git a/server/licenses/lucene-backward-codecs-9.3.0-snapshot-b8d1fcfd0ec.jar.sha1 b/server/licenses/lucene-backward-codecs-9.3.0-snapshot-b8d1fcfd0ec.jar.sha1 deleted file mode 100644 index 220d2b83dacd9..0000000000000 --- a/server/licenses/lucene-backward-codecs-9.3.0-snapshot-b8d1fcfd0ec.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -3ad36d2a32c1dda37040cdfed9dcdf294b8f3b7c \ No newline at end of file diff --git a/server/licenses/lucene-backward-codecs-9.3.0.jar.sha1 b/server/licenses/lucene-backward-codecs-9.3.0.jar.sha1 new file mode 100644 index 0000000000000..1dda17ee92fdb --- /dev/null +++ b/server/licenses/lucene-backward-codecs-9.3.0.jar.sha1 @@ -0,0 +1 @@ +95ea01ee0d1e543e18e3cf58d8a6a27a587a7239 \ No newline at end of file diff --git a/server/licenses/lucene-core-9.3.0-snapshot-b8d1fcfd0ec.jar.sha1 b/server/licenses/lucene-core-9.3.0-snapshot-b8d1fcfd0ec.jar.sha1 deleted file mode 100644 index 1acc580cf4a7e..0000000000000 --- a/server/licenses/lucene-core-9.3.0-snapshot-b8d1fcfd0ec.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -e41aa9fe38033e61da13fe420aa6e9400f467dd8 \ No newline at end of file diff --git a/server/licenses/lucene-core-9.3.0.jar.sha1 b/server/licenses/lucene-core-9.3.0.jar.sha1 new file mode 100644 index 0000000000000..fd870008c5bd4 --- /dev/null +++ b/server/licenses/lucene-core-9.3.0.jar.sha1 @@ -0,0 +1 @@ +a030180999bc3f1a65f23f53b38098ca9daeee79 \ No newline at end of file diff --git a/server/licenses/lucene-grouping-9.3.0-snapshot-b8d1fcfd0ec.jar.sha1 b/server/licenses/lucene-grouping-9.3.0-snapshot-b8d1fcfd0ec.jar.sha1 deleted file mode 100644 index 25f07a5af5a69..0000000000000 --- a/server/licenses/lucene-grouping-9.3.0-snapshot-b8d1fcfd0ec.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -71cd063e306af5acf1cef0492eebbbf000e6a6ce \ No newline at end of file diff --git a/server/licenses/lucene-grouping-9.3.0.jar.sha1 b/server/licenses/lucene-grouping-9.3.0.jar.sha1 new file mode 100644 index 0000000000000..6f63ca177d3c3 --- /dev/null +++ b/server/licenses/lucene-grouping-9.3.0.jar.sha1 @@ -0,0 +1 @@ +883071196e53ec93d2a53dcc8211ee30be6c00dc \ No newline at end of file diff --git a/server/licenses/lucene-highlighter-9.3.0-snapshot-b8d1fcfd0ec.jar.sha1 b/server/licenses/lucene-highlighter-9.3.0-snapshot-b8d1fcfd0ec.jar.sha1 deleted file mode 100644 index 263f6dd6b208e..0000000000000 --- a/server/licenses/lucene-highlighter-9.3.0-snapshot-b8d1fcfd0ec.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -adc913180fac1b221f57288661f069cb7a240127 \ No newline at end of file diff --git a/server/licenses/lucene-highlighter-9.3.0.jar.sha1 b/server/licenses/lucene-highlighter-9.3.0.jar.sha1 new file mode 100644 index 0000000000000..78264d8ee3713 --- /dev/null +++ b/server/licenses/lucene-highlighter-9.3.0.jar.sha1 @@ -0,0 +1 @@ +7e895c49b9991ea2ec08855c425b9eae44a08764 \ No newline at end of file diff --git a/server/licenses/lucene-join-9.3.0-snapshot-b8d1fcfd0ec.jar.sha1 b/server/licenses/lucene-join-9.3.0-snapshot-b8d1fcfd0ec.jar.sha1 deleted file mode 100644 index 638bf0f37a91f..0000000000000 --- a/server/licenses/lucene-join-9.3.0-snapshot-b8d1fcfd0ec.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -3618de63e62d734ab1892ff446ae4f5ef866bee6 \ No newline at end of file diff --git a/server/licenses/lucene-join-9.3.0.jar.sha1 b/server/licenses/lucene-join-9.3.0.jar.sha1 new file mode 100644 index 0000000000000..5e641f5f01075 --- /dev/null +++ b/server/licenses/lucene-join-9.3.0.jar.sha1 @@ -0,0 +1 @@ +04baaae4ce4a35ae919150dd17cd1e63b0da9d24 \ No newline at end of file diff --git a/server/licenses/lucene-memory-9.3.0-snapshot-b8d1fcfd0ec.jar.sha1 b/server/licenses/lucene-memory-9.3.0-snapshot-b8d1fcfd0ec.jar.sha1 deleted file mode 100644 index 3ca4420b49396..0000000000000 --- a/server/licenses/lucene-memory-9.3.0-snapshot-b8d1fcfd0ec.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -07e0de548fc392428545db40192280b4f83daf4f \ No newline at end of file diff --git a/server/licenses/lucene-memory-9.3.0.jar.sha1 b/server/licenses/lucene-memory-9.3.0.jar.sha1 new file mode 100644 index 0000000000000..c8e86c7674ede --- /dev/null +++ b/server/licenses/lucene-memory-9.3.0.jar.sha1 @@ -0,0 +1 @@ +1a2203b332edc1366b9789f5286296e109dbc8c4 \ No newline at end of file diff --git a/server/licenses/lucene-misc-9.3.0-snapshot-b8d1fcfd0ec.jar.sha1 b/server/licenses/lucene-misc-9.3.0-snapshot-b8d1fcfd0ec.jar.sha1 deleted file mode 100644 index 09ecd52494738..0000000000000 --- a/server/licenses/lucene-misc-9.3.0-snapshot-b8d1fcfd0ec.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -ab308291a7dd5ec9988a229dc8e7c27fc2bb5409 \ No newline at end of file diff --git a/server/licenses/lucene-misc-9.3.0.jar.sha1 b/server/licenses/lucene-misc-9.3.0.jar.sha1 new file mode 100644 index 0000000000000..11a459a9f52ba --- /dev/null +++ b/server/licenses/lucene-misc-9.3.0.jar.sha1 @@ -0,0 +1 @@ +61b502c9557247b6803a346c0bab20c9dc89d125 \ No newline at end of file diff --git a/server/licenses/lucene-queries-9.3.0-snapshot-b8d1fcfd0ec.jar.sha1 b/server/licenses/lucene-queries-9.3.0-snapshot-b8d1fcfd0ec.jar.sha1 deleted file mode 100644 index 8d2959d64aac3..0000000000000 --- a/server/licenses/lucene-queries-9.3.0-snapshot-b8d1fcfd0ec.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -8b5804be2c87d995c5255ff1ad739052fc243661 \ No newline at end of file diff --git a/server/licenses/lucene-queries-9.3.0.jar.sha1 b/server/licenses/lucene-queries-9.3.0.jar.sha1 new file mode 100644 index 0000000000000..2b577bd33b46a --- /dev/null +++ b/server/licenses/lucene-queries-9.3.0.jar.sha1 @@ -0,0 +1 @@ +d8fe3bce3c05015c5fdb78279f36b9f1a75b98d8 \ No newline at end of file diff --git a/server/licenses/lucene-queryparser-9.3.0-snapshot-b8d1fcfd0ec.jar.sha1 b/server/licenses/lucene-queryparser-9.3.0-snapshot-b8d1fcfd0ec.jar.sha1 deleted file mode 100644 index 0be8f71b787a2..0000000000000 --- a/server/licenses/lucene-queryparser-9.3.0-snapshot-b8d1fcfd0ec.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -a55d8a68cccaaf4af5a973c4332519d3eb477068 \ No newline at end of file diff --git a/server/licenses/lucene-queryparser-9.3.0.jar.sha1 b/server/licenses/lucene-queryparser-9.3.0.jar.sha1 new file mode 100644 index 0000000000000..b106860bf9f3e --- /dev/null +++ b/server/licenses/lucene-queryparser-9.3.0.jar.sha1 @@ -0,0 +1 @@ +78f259a66d48f77a2d2b96a0a858efa08eba72dc \ No newline at end of file diff --git a/server/licenses/lucene-sandbox-9.3.0-snapshot-b8d1fcfd0ec.jar.sha1 b/server/licenses/lucene-sandbox-9.3.0-snapshot-b8d1fcfd0ec.jar.sha1 deleted file mode 100644 index da0e369ccba29..0000000000000 --- a/server/licenses/lucene-sandbox-9.3.0-snapshot-b8d1fcfd0ec.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -0a6005b6b9b09b1da1c3c74558693824f429e55d \ No newline at end of file diff --git a/server/licenses/lucene-sandbox-9.3.0.jar.sha1 b/server/licenses/lucene-sandbox-9.3.0.jar.sha1 new file mode 100644 index 0000000000000..82c2c6d85ca4c --- /dev/null +++ b/server/licenses/lucene-sandbox-9.3.0.jar.sha1 @@ -0,0 +1 @@ +5ee318cf8e9a70c2c99e03e157465316a3d4a17a \ No newline at end of file diff --git a/server/licenses/lucene-suggest-9.3.0-snapshot-b8d1fcfd0ec.jar.sha1 b/server/licenses/lucene-suggest-9.3.0-snapshot-b8d1fcfd0ec.jar.sha1 deleted file mode 100644 index 38a6a7ca5e787..0000000000000 --- a/server/licenses/lucene-suggest-9.3.0-snapshot-b8d1fcfd0ec.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -d4c5418c469be74cc5df3427ac07386598c18882 \ No newline at end of file diff --git a/server/licenses/lucene-suggest-9.3.0.jar.sha1 b/server/licenses/lucene-suggest-9.3.0.jar.sha1 new file mode 100644 index 0000000000000..71a263aa163f8 --- /dev/null +++ b/server/licenses/lucene-suggest-9.3.0.jar.sha1 @@ -0,0 +1 @@ +fb5d7243ba67616edbda1ecf421c615dd595752d \ No newline at end of file diff --git a/server/src/internalClusterTest/java/org/elasticsearch/cluster/coordination/CoordinationDiagnosticsServiceIT.java b/server/src/internalClusterTest/java/org/elasticsearch/cluster/coordination/CoordinationDiagnosticsServiceIT.java new file mode 100644 index 0000000000000..66346aae64dca --- /dev/null +++ b/server/src/internalClusterTest/java/org/elasticsearch/cluster/coordination/CoordinationDiagnosticsServiceIT.java @@ -0,0 +1,98 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License + * 2.0 and the Server Side Public License, v 1; you may not use this file except + * in compliance with, at your election, the Elastic License 2.0 or the Server + * Side Public License, v 1. + */ + +package org.elasticsearch.cluster.coordination; + +import org.elasticsearch.cluster.node.DiscoveryNode; +import org.elasticsearch.cluster.node.DiscoveryNodes; +import org.elasticsearch.cluster.service.ClusterService; +import org.elasticsearch.core.TimeValue; +import org.elasticsearch.test.ESIntegTestCase; +import org.elasticsearch.test.disruption.BlockClusterStateProcessing; +import org.elasticsearch.threadpool.Scheduler; +import org.junit.Before; + +import java.util.List; +import java.util.Set; +import java.util.concurrent.ConcurrentHashMap; +import java.util.concurrent.ConcurrentMap; +import java.util.stream.Collectors; + +import static org.hamcrest.Matchers.emptyOrNullString; +import static org.hamcrest.Matchers.hasItem; +import static org.hamcrest.Matchers.is; +import static org.hamcrest.Matchers.not; + +@ESIntegTestCase.ClusterScope(scope = ESIntegTestCase.Scope.TEST, numDataNodes = 0, autoManageMasterNodes = false) +public class CoordinationDiagnosticsServiceIT extends ESIntegTestCase { + @Before + private void setBootstrapMasterNodeIndex() { + internalCluster().setBootstrapMasterNodeIndex(0); + } + + public void testBlockClusterStateProcessingOnOneNode() throws Exception { + /* + * This test picks a node that is not elected master, and then blocks cluster state processing on it. The reason is so that we + * can call CoordinationDiagnosticsService#beginPollingClusterFormationInfo without a cluster changed event resulting in the + * values we pass in being overwritten. + */ + final List nodeNames = internalCluster().startNodes(3); + + final String master = internalCluster().getMasterName(); + assertThat(nodeNames, hasItem(master)); + String blockedNode = nodeNames.stream().filter(n -> n.equals(master) == false).findAny().get(); + assertNotNull(blockedNode); + ensureStableCluster(3); + + DiscoveryNodes discoveryNodes = internalCluster().getInstance(ClusterService.class, master).state().nodes(); + Set nodesWithoutBlockedNode = discoveryNodes.getNodes() + .values() + .stream() + .filter(n -> n.getName().equals(blockedNode) == false) + .collect(Collectors.toSet()); + + BlockClusterStateProcessing disruption = new BlockClusterStateProcessing(blockedNode, random()); + internalCluster().setDisruptionScheme(disruption); + // stop processing cluster state changes + disruption.startDisrupting(); + + CoordinationDiagnosticsService diagnosticsOnBlockedNode = internalCluster().getInstance( + CoordinationDiagnosticsService.class, + blockedNode + ); + ConcurrentMap nodeToClusterFormationStateMap = + new ConcurrentHashMap<>(); + ConcurrentHashMap cancellables = new ConcurrentHashMap<>(); + diagnosticsOnBlockedNode.clusterFormationResponses = nodeToClusterFormationStateMap; + diagnosticsOnBlockedNode.clusterFormationInfoTasks = cancellables; + + diagnosticsOnBlockedNode.remoteRequestInitialDelay = TimeValue.ZERO; + diagnosticsOnBlockedNode.beginPollingClusterFormationInfo( + nodesWithoutBlockedNode, + nodeToClusterFormationStateMap::put, + cancellables + ); + + // while the node is blocked from processing cluster state changes it should reach out to the other 2 + // master eligible nodes and get a successful response + assertBusy(() -> { + assertThat(cancellables.size(), is(2)); + assertThat(nodeToClusterFormationStateMap.size(), is(2)); + nodesWithoutBlockedNode.forEach(node -> { + CoordinationDiagnosticsService.ClusterFormationStateOrException result = nodeToClusterFormationStateMap.get(node); + assertNotNull(result); + assertNotNull(result.clusterFormationState()); + assertNull(result.exception()); + ClusterFormationFailureHelper.ClusterFormationState clusterFormationState = result.clusterFormationState(); + assertThat(clusterFormationState.getDescription(), not(emptyOrNullString())); + }); + }); + + disruption.stopDisrupting(); + } +} diff --git a/server/src/internalClusterTest/java/org/elasticsearch/discovery/StableMasterDisruptionIT.java b/server/src/internalClusterTest/java/org/elasticsearch/discovery/StableMasterDisruptionIT.java index 3e27406c1e594..f4db7e7cca855 100644 --- a/server/src/internalClusterTest/java/org/elasticsearch/discovery/StableMasterDisruptionIT.java +++ b/server/src/internalClusterTest/java/org/elasticsearch/discovery/StableMasterDisruptionIT.java @@ -426,28 +426,24 @@ public void testRepeatedNullMasterRecognizedAsGreenIfMasterDoesNotKnowItIsUnstab .put(CoordinationDiagnosticsService.NO_MASTER_TRANSITIONS_THRESHOLD_SETTING.getKey(), 1) .build() ); + int nullTransitionsThreshold = 1; final List dataNodes = internalCluster().startDataOnlyNodes( 2, Settings.builder() .put(LeaderChecker.LEADER_CHECK_TIMEOUT_SETTING.getKey(), "1s") .put(Coordinator.PUBLISH_TIMEOUT_SETTING.getKey(), "1s") - .put(CoordinationDiagnosticsService.NO_MASTER_TRANSITIONS_THRESHOLD_SETTING.getKey(), 1) + .put(CoordinationDiagnosticsService.NO_MASTER_TRANSITIONS_THRESHOLD_SETTING.getKey(), nullTransitionsThreshold) .put(CoordinationDiagnosticsService.NODE_HAS_MASTER_LOOKUP_TIMEFRAME_SETTING.getKey(), new TimeValue(60, TimeUnit.SECONDS)) .build() ); ensureStableCluster(3); - for (int i = 0; i < 2; i++) { + for (int i = 0; i < nullTransitionsThreshold + 1; i++) { final String masterNode = masterNodes.get(0); // Simulating a painful gc by suspending all threads for a long time on the current elected master node. SingleNodeDisruption masterNodeDisruption = new LongGCDisruption(random(), masterNode); final CountDownLatch dataNodeMasterSteppedDown = new CountDownLatch(2); - internalCluster().getInstance(ClusterService.class, masterNode).addListener(event -> { - if (event.state().nodes().getMasterNodeId() == null) { - dataNodeMasterSteppedDown.countDown(); - } - }); internalCluster().getInstance(ClusterService.class, dataNodes.get(0)).addListener(event -> { if (event.state().nodes().getMasterNodeId() == null) { dataNodeMasterSteppedDown.countDown(); @@ -466,7 +462,7 @@ public void testRepeatedNullMasterRecognizedAsGreenIfMasterDoesNotKnowItIsUnstab // Stop disruption logger.info("--> unfreezing node [{}]", masterNode); masterNodeDisruption.stopDisrupting(); - ensureStableCluster(3); + ensureStableCluster(3, TimeValue.timeValueSeconds(30), false, randomFrom(dataNodes)); } assertGreenMasterStability(internalCluster().client(randomFrom(dataNodes))); } diff --git a/server/src/internalClusterTest/java/org/elasticsearch/indices/state/CloseIndexIT.java b/server/src/internalClusterTest/java/org/elasticsearch/indices/state/CloseIndexIT.java index a847c6e848e29..20016c546d622 100644 --- a/server/src/internalClusterTest/java/org/elasticsearch/indices/state/CloseIndexIT.java +++ b/server/src/internalClusterTest/java/org/elasticsearch/indices/state/CloseIndexIT.java @@ -9,6 +9,7 @@ import org.elasticsearch.ExceptionsHelper; import org.elasticsearch.action.ActionRequestValidationException; +import org.elasticsearch.action.admin.cluster.health.ClusterHealthResponse; import org.elasticsearch.action.admin.indices.close.CloseIndexRequestBuilder; import org.elasticsearch.action.admin.indices.close.CloseIndexResponse; import org.elasticsearch.action.support.ActiveShardCount; @@ -20,9 +21,11 @@ import org.elasticsearch.cluster.metadata.MetadataIndexStateService; import org.elasticsearch.cluster.node.DiscoveryNode; import org.elasticsearch.cluster.routing.ShardRouting; +import org.elasticsearch.common.Priority; import org.elasticsearch.common.settings.Settings; import org.elasticsearch.common.unit.ByteSizeUnit; import org.elasticsearch.common.unit.ByteSizeValue; +import org.elasticsearch.core.TimeValue; import org.elasticsearch.index.IndexNotFoundException; import org.elasticsearch.index.IndexService; import org.elasticsearch.index.IndexSettings; @@ -55,6 +58,7 @@ import static org.hamcrest.Matchers.equalTo; import static org.hamcrest.Matchers.hasSize; import static org.hamcrest.Matchers.is; +import static org.hamcrest.Matchers.lessThanOrEqualTo; import static org.hamcrest.Matchers.not; import static org.hamcrest.Matchers.notNullValue; import static org.hamcrest.Matchers.nullValue; @@ -187,7 +191,22 @@ public void testConcurrentClose() throws InterruptedException { .mapToObj(i -> client().prepareIndex(indexName).setId(String.valueOf(i)).setSource("num", i)) .collect(toList()) ); - ensureYellowAndNoInitializingShards(indexName); + + ClusterHealthResponse healthResponse = client().admin() + .cluster() + .prepareHealth(indexName) + .setWaitForYellowStatus() + .setWaitForEvents(Priority.LANGUID) + .setWaitForNoRelocatingShards(true) + .setWaitForNoInitializingShards(true) + .setWaitForNodes(Integer.toString(cluster().size())) + .setTimeout(TimeValue.timeValueSeconds(60L)) + .get(); + if (healthResponse.isTimedOut()) { + logClusterState(); + } + assertThat(healthResponse.isTimedOut(), equalTo(false)); + assertThat(healthResponse.getIndices().get(indexName).getStatus().value(), lessThanOrEqualTo(ClusterHealthStatus.YELLOW.value())); final CountDownLatch startClosing = new CountDownLatch(1); final Thread[] threads = new Thread[randomIntBetween(2, 5)]; diff --git a/server/src/internalClusterTest/java/org/elasticsearch/indices/template/SimpleIndexTemplateIT.java b/server/src/internalClusterTest/java/org/elasticsearch/indices/template/SimpleIndexTemplateIT.java index 85e188b30c1a5..64760ee8c154c 100644 --- a/server/src/internalClusterTest/java/org/elasticsearch/indices/template/SimpleIndexTemplateIT.java +++ b/server/src/internalClusterTest/java/org/elasticsearch/indices/template/SimpleIndexTemplateIT.java @@ -14,6 +14,8 @@ import org.elasticsearch.action.admin.indices.template.get.GetIndexTemplatesResponse; import org.elasticsearch.action.admin.indices.template.put.PutIndexTemplateRequestBuilder; import org.elasticsearch.action.bulk.BulkResponse; +import org.elasticsearch.action.fieldcaps.FieldCapabilities; +import org.elasticsearch.action.fieldcaps.FieldCapabilitiesResponse; import org.elasticsearch.action.index.IndexRequest; import org.elasticsearch.action.search.SearchResponse; import org.elasticsearch.cluster.ClusterState; @@ -39,6 +41,7 @@ import java.util.Collections; import java.util.HashSet; import java.util.List; +import java.util.Map; import java.util.Set; import static org.elasticsearch.action.support.WriteRequest.RefreshPolicy.IMMEDIATE; @@ -1009,4 +1012,88 @@ public void testPartitionedTemplate() throws Exception { GetSettingsResponse getSettingsResponse = client().admin().indices().prepareGetSettings("test_good").get(); assertEquals("6", getSettingsResponse.getIndexToSettings().get("test_good").get("index.routing_partition_size")); } + + public void testIndexTemplatesWithSameSubfield() { + client().admin() + .indices() + .preparePutTemplate("template_1") + .setPatterns(Collections.singletonList("te*")) + .setSettings(indexSettings()) + .setOrder(100) + .setMapping(""" + { + "_doc": { + "properties": { + "kwm": { + "properties": { + "source": { + "properties": { + "geo": { + "properties": { + "location": { + "type": "geo_point" + } + } + } + } + } + } + }, + "source": { + "properties": { + "geo": { + "properties": { + "location": { + "type": "geo_point" + } + } + } + } + } + } + } + } + """, XContentType.JSON) + .get(); + + client().admin() + .indices() + .preparePutTemplate("template_2") + .setPatterns(Collections.singletonList("test*")) + .setSettings(indexSettings()) + .setOrder(1) + .setMapping(""" + { + "_doc": { + "properties": { + "kwm.source.geo": { + "properties": { + "location": { + "type": "geo_point" + } + } + } + } + } + } + """, XContentType.JSON) + .get(); + + client().prepareIndex("test").setSource().get(); + FieldCapabilitiesResponse fieldCapabilitiesResponse = client().prepareFieldCaps("test").setFields("*location").get(); + { + Map field = fieldCapabilitiesResponse.getField("kwm.source.geo.location"); + assertNotNull(field); + FieldCapabilities fieldCapabilities = field.get("geo_point"); + assertTrue(fieldCapabilities.isSearchable()); + assertTrue(fieldCapabilities.isAggregatable()); + } + { + Map field = fieldCapabilitiesResponse.getField("source.geo.location"); + assertNotNull(field); + FieldCapabilities fieldCapabilities = field.get("geo_point"); + assertTrue(fieldCapabilities.isSearchable()); + assertTrue(fieldCapabilities.isAggregatable()); + } + } } diff --git a/server/src/internalClusterTest/java/org/elasticsearch/reservedstate/service/FileSettingsServiceIT.java b/server/src/internalClusterTest/java/org/elasticsearch/reservedstate/service/FileSettingsServiceIT.java index f4a9d2993d188..cfde48d088db9 100644 --- a/server/src/internalClusterTest/java/org/elasticsearch/reservedstate/service/FileSettingsServiceIT.java +++ b/server/src/internalClusterTest/java/org/elasticsearch/reservedstate/service/FileSettingsServiceIT.java @@ -20,11 +20,14 @@ import org.elasticsearch.cluster.service.ClusterService; import org.elasticsearch.common.settings.Settings; import org.elasticsearch.core.Strings; +import org.elasticsearch.core.Tuple; import org.elasticsearch.reservedstate.action.ReservedClusterSettingsAction; import org.elasticsearch.test.ESIntegTestCase; import java.nio.charset.StandardCharsets; import java.nio.file.Files; +import java.nio.file.Path; +import java.nio.file.StandardCopyOption; import java.util.concurrent.CountDownLatch; import java.util.concurrent.ExecutionException; import java.util.concurrent.TimeUnit; @@ -33,7 +36,6 @@ import static org.elasticsearch.indices.recovery.RecoverySettings.INDICES_RECOVERY_MAX_BYTES_PER_SEC_SETTING; import static org.elasticsearch.test.NodeRoles.dataOnlyNode; import static org.hamcrest.Matchers.allOf; -import static org.hamcrest.Matchers.contains; import static org.hamcrest.Matchers.containsString; import static org.hamcrest.Matchers.equalTo; import static org.hamcrest.Matchers.hasSize; @@ -43,7 +45,7 @@ @ESIntegTestCase.ClusterScope(scope = ESIntegTestCase.Scope.TEST, numDataNodes = 0, autoManageMasterNodes = false) public class FileSettingsServiceIT extends ESIntegTestCase { - private AtomicLong versionCounter = new AtomicLong(1); + private static AtomicLong versionCounter = new AtomicLong(1); private static String testJSON = """ { @@ -84,36 +86,42 @@ private void writeJSONFile(String node, String json) throws Exception { FileSettingsService fileSettingsService = internalCluster().getInstance(FileSettingsService.class, node); Files.createDirectories(fileSettingsService.operatorSettingsDir()); - Files.write(fileSettingsService.operatorSettingsFile(), Strings.format(json, version).getBytes(StandardCharsets.UTF_8)); + Path tempFilePath = createTempFile(); + + Files.write(tempFilePath, Strings.format(json, version).getBytes(StandardCharsets.UTF_8)); + Files.move(tempFilePath, fileSettingsService.operatorSettingsFile(), StandardCopyOption.ATOMIC_MOVE); } - private CountDownLatch setupClusterStateListener(String node) { + private Tuple setupClusterStateListener(String node) { ClusterService clusterService = internalCluster().clusterService(node); CountDownLatch savedClusterState = new CountDownLatch(1); + AtomicLong metadataVersion = new AtomicLong(-1); clusterService.addListener(new ClusterStateListener() { @Override public void clusterChanged(ClusterChangedEvent event) { ReservedStateMetadata reservedState = event.state().metadata().reservedStateMetadata().get(FileSettingsService.NAMESPACE); if (reservedState != null) { ReservedStateHandlerMetadata handlerMetadata = reservedState.handlers().get(ReservedClusterSettingsAction.NAME); - if (handlerMetadata == null) { - fail("Should've found cluster settings in this metadata"); + if (handlerMetadata != null && handlerMetadata.keys().contains("indices.recovery.max_bytes_per_sec")) { + clusterService.removeListener(this); + metadataVersion.set(event.state().metadata().version()); + savedClusterState.countDown(); } - assertThat(handlerMetadata.keys(), contains("indices.recovery.max_bytes_per_sec")); - clusterService.removeListener(this); - savedClusterState.countDown(); } } }); - return savedClusterState; + return new Tuple<>(savedClusterState, metadataVersion); } - private void assertClusterStateSaveOK(CountDownLatch savedClusterState) throws Exception { + private void assertClusterStateSaveOK(CountDownLatch savedClusterState, AtomicLong metadataVersion) throws Exception { boolean awaitSuccessful = savedClusterState.await(20, TimeUnit.SECONDS); assertTrue(awaitSuccessful); - final ClusterStateResponse clusterStateResponse = client().admin().cluster().state(new ClusterStateRequest()).actionGet(); + final ClusterStateResponse clusterStateResponse = client().admin() + .cluster() + .state(new ClusterStateRequest().waitForMetadataVersion(metadataVersion.get())) + .actionGet(); assertThat( clusterStateResponse.getState().metadata().persistentSettings().get(INDICES_RECOVERY_MAX_BYTES_PER_SEC_SETTING.getKey()), @@ -150,7 +158,7 @@ public void testSettingsApplied() throws Exception { assertFalse(dataFileSettingsService.watching()); writeJSONFile(masterNode, testJSON); - assertClusterStateSaveOK(savedClusterState); + assertClusterStateSaveOK(savedClusterState.v1(), savedClusterState.v2()); } public void testSettingsAppliedOnStart() throws Exception { @@ -175,17 +183,18 @@ public void testSettingsAppliedOnStart() throws Exception { assertTrue(masterFileSettingsService.watching()); assertFalse(dataFileSettingsService.watching()); - assertClusterStateSaveOK(savedClusterState); + assertClusterStateSaveOK(savedClusterState.v1(), savedClusterState.v2()); } - private CountDownLatch setupClusterStateListenerForError(String node) { + private Tuple setupClusterStateListenerForError(String node) { ClusterService clusterService = internalCluster().clusterService(node); CountDownLatch savedClusterState = new CountDownLatch(1); + AtomicLong metadataVersion = new AtomicLong(-1); clusterService.addListener(new ClusterStateListener() { @Override public void clusterChanged(ClusterChangedEvent event) { ReservedStateMetadata reservedState = event.state().metadata().reservedStateMetadata().get(FileSettingsService.NAMESPACE); - if (reservedState != null) { + if (reservedState != null && reservedState.errorMetadata() != null) { assertEquals(ReservedStateErrorMetadata.ErrorKind.PARSING, reservedState.errorMetadata().errorKind()); assertThat(reservedState.errorMetadata().errors(), allOf(notNullValue(), hasSize(1))); assertThat( @@ -193,19 +202,23 @@ public void clusterChanged(ClusterChangedEvent event) { containsString("Missing handler definition for content key [not_cluster_settings]") ); clusterService.removeListener(this); + metadataVersion.set(event.state().metadata().version()); savedClusterState.countDown(); } } }); - return savedClusterState; + return new Tuple<>(savedClusterState, metadataVersion); } - private void assertClusterStateNotSaved(CountDownLatch savedClusterState) throws Exception { + private void assertClusterStateNotSaved(CountDownLatch savedClusterState, AtomicLong metadataVersion) throws Exception { boolean awaitSuccessful = savedClusterState.await(20, TimeUnit.SECONDS); assertTrue(awaitSuccessful); - final ClusterStateResponse clusterStateResponse = client().admin().cluster().state(new ClusterStateRequest()).actionGet(); + final ClusterStateResponse clusterStateResponse = client().admin() + .cluster() + .state(new ClusterStateRequest().waitForMetadataVersion(metadataVersion.get())) + .actionGet(); assertThat(clusterStateResponse.getState().metadata().persistentSettings().get("search.allow_expensive_queries"), nullValue()); @@ -235,6 +248,6 @@ public void testErrorSaved() throws Exception { assertFalse(dataFileSettingsService.watching()); writeJSONFile(masterNode, testErrorJSON); - assertClusterStateNotSaved(savedClusterState); + assertClusterStateNotSaved(savedClusterState.v1(), savedClusterState.v2()); } } diff --git a/server/src/internalClusterTest/java/org/elasticsearch/reservedstate/service/SnaphotsAndFileSettingsIT.java b/server/src/internalClusterTest/java/org/elasticsearch/reservedstate/service/SnaphotsAndFileSettingsIT.java new file mode 100644 index 0000000000000..4432846ee5be8 --- /dev/null +++ b/server/src/internalClusterTest/java/org/elasticsearch/reservedstate/service/SnaphotsAndFileSettingsIT.java @@ -0,0 +1,340 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License + * 2.0 and the Server Side Public License, v 1; you may not use this file except + * in compliance with, at your election, the Elastic License 2.0 or the Server + * Side Public License, v 1. + */ + +package org.elasticsearch.reservedstate.service; + +import org.elasticsearch.action.admin.cluster.settings.ClusterGetSettingsAction; +import org.elasticsearch.action.admin.cluster.state.ClusterStateRequest; +import org.elasticsearch.action.admin.cluster.state.ClusterStateResponse; +import org.elasticsearch.cluster.ClusterChangedEvent; +import org.elasticsearch.cluster.ClusterStateListener; +import org.elasticsearch.cluster.InternalClusterInfoService; +import org.elasticsearch.cluster.metadata.ReservedStateHandlerMetadata; +import org.elasticsearch.cluster.metadata.ReservedStateMetadata; +import org.elasticsearch.cluster.service.ClusterService; +import org.elasticsearch.common.settings.Settings; +import org.elasticsearch.core.Strings; +import org.elasticsearch.core.TimeValue; +import org.elasticsearch.core.Tuple; +import org.elasticsearch.reservedstate.action.ReservedClusterSettingsAction; +import org.elasticsearch.snapshots.AbstractSnapshotIntegTestCase; +import org.elasticsearch.snapshots.SnapshotState; +import org.junit.After; + +import java.nio.charset.StandardCharsets; +import java.nio.file.Files; +import java.nio.file.Path; +import java.nio.file.StandardCopyOption; +import java.util.concurrent.CountDownLatch; +import java.util.concurrent.TimeUnit; +import java.util.concurrent.atomic.AtomicLong; + +import static org.elasticsearch.indices.recovery.RecoverySettings.INDICES_RECOVERY_MAX_BYTES_PER_SEC_SETTING; +import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.assertAcked; +import static org.hamcrest.Matchers.equalTo; + +/** + * Tests that snapshot restore behaves correctly when we have file based settings that reserve part of the + * cluster state + */ +public class SnaphotsAndFileSettingsIT extends AbstractSnapshotIntegTestCase { + private static AtomicLong versionCounter = new AtomicLong(1); + + private static String testFileSettingsJSON = """ + { + "metadata": { + "version": "%s", + "compatibility": "8.4.0" + }, + "state": { + "cluster_settings": { + "indices.recovery.max_bytes_per_sec": "50mb" + } + } + }"""; + + private static String emptyFileSettingsJSON = """ + { + "metadata": { + "version": "%s", + "compatibility": "8.4.0" + }, + "state": { + "cluster_settings": {} + } + }"""; + + @After + public void cleanUp() throws Exception { + awaitNoMoreRunningOperations(); + } + + private void writeJSONFile(String node, String json) throws Exception { + long version = versionCounter.incrementAndGet(); + + FileSettingsService fileSettingsService = internalCluster().getInstance(FileSettingsService.class, node); + + Files.createDirectories(fileSettingsService.operatorSettingsDir()); + Path tempFilePath = createTempFile(); + + Files.write(tempFilePath, Strings.format(json, version).getBytes(StandardCharsets.UTF_8)); + Files.move(tempFilePath, fileSettingsService.operatorSettingsFile(), StandardCopyOption.ATOMIC_MOVE); + } + + private Tuple setupClusterStateListener(String node) { + ClusterService clusterService = internalCluster().clusterService(node); + CountDownLatch savedClusterState = new CountDownLatch(1); + AtomicLong metadataVersion = new AtomicLong(-1); + clusterService.addListener(new ClusterStateListener() { + @Override + public void clusterChanged(ClusterChangedEvent event) { + ReservedStateMetadata reservedState = event.state().metadata().reservedStateMetadata().get(FileSettingsService.NAMESPACE); + if (reservedState != null && reservedState.version() != 0L) { + ReservedStateHandlerMetadata handlerMetadata = reservedState.handlers().get(ReservedClusterSettingsAction.NAME); + if (handlerMetadata == null) { + fail("Should've found cluster settings in this metadata"); + } + if (handlerMetadata.keys().contains("indices.recovery.max_bytes_per_sec")) { + clusterService.removeListener(this); + metadataVersion.set(event.state().metadata().version()); + savedClusterState.countDown(); + } + } + } + }); + + return new Tuple<>(savedClusterState, metadataVersion); + } + + private ClusterStateResponse assertClusterStateSaveOK(CountDownLatch savedClusterState, AtomicLong metadataVersion) throws Exception { + boolean awaitSuccessful = savedClusterState.await(20, TimeUnit.SECONDS); + assertTrue(awaitSuccessful); + + return clusterAdmin().state(new ClusterStateRequest().waitForMetadataVersion(metadataVersion.get())).actionGet(); + } + + public void testRestoreWithRemovedFileSettings() throws Exception { + try { + createRepository("test-repo", "fs"); + + logger.info("--> set some persistent cluster settings"); + assertAcked( + clusterAdmin().prepareUpdateSettings() + .setPersistentSettings( + Settings.builder() + .put(InternalClusterInfoService.INTERNAL_CLUSTER_INFO_TIMEOUT_SETTING.getKey(), TimeValue.timeValueSeconds(25)) + .build() + ) + ); + + ensureGreen(); + + String masterNode = internalCluster().getMasterName(); + + var savedClusterState = setupClusterStateListener(masterNode); + FileSettingsService fs = internalCluster().getInstance(FileSettingsService.class, masterNode); + + logger.info("--> write some file based settings, putting some reserved state"); + writeJSONFile(masterNode, testFileSettingsJSON); + final ClusterStateResponse savedStateResponse = assertClusterStateSaveOK(savedClusterState.v1(), savedClusterState.v2()); + assertThat( + savedStateResponse.getState().metadata().persistentSettings().get(INDICES_RECOVERY_MAX_BYTES_PER_SEC_SETTING.getKey()), + equalTo("50mb") + ); + + logger.info("--> create full snapshot"); + createFullSnapshot("test-repo", "test-snap"); + assertThat(getSnapshot("test-repo", "test-snap").state(), equalTo(SnapshotState.SUCCESS)); + + assertAcked( + clusterAdmin().prepareUpdateSettings() + .setPersistentSettings( + Settings.builder() + .put(InternalClusterInfoService.INTERNAL_CLUSTER_INFO_TIMEOUT_SETTING.getKey(), TimeValue.timeValueSeconds(55)) + .build() + ) + ); + + logger.info("--> deleting operator file, no file based settings"); + Files.delete(fs.operatorSettingsFile()); + + logger.info("--> restore global state from the snapshot"); + clusterAdmin().prepareRestoreSnapshot("test-repo", "test-snap").setRestoreGlobalState(true).setWaitForCompletion(true).get(); + + ensureGreen(); + + final ClusterStateResponse clusterStateResponse = clusterAdmin().state(new ClusterStateRequest().metadata(true)).actionGet(); + + // We expect no reserved metadata state for file based settings, the operator file was deleted. + assertNull(clusterStateResponse.getState().metadata().reservedStateMetadata().get(FileSettingsService.NAMESPACE)); + + final ClusterGetSettingsAction.Response getSettingsResponse = clusterAdmin().execute( + ClusterGetSettingsAction.INSTANCE, + new ClusterGetSettingsAction.Request() + ).actionGet(); + + assertThat( + getSettingsResponse.persistentSettings().get(InternalClusterInfoService.INTERNAL_CLUSTER_INFO_TIMEOUT_SETTING.getKey()), + equalTo("25s") + ); + // We didn't remove the setting set by file settings, we simply removed the reserved (operator) section. + assertThat(getSettingsResponse.persistentSettings().get("indices.recovery.max_bytes_per_sec"), equalTo("50mb")); + } finally { + // cleanup + assertAcked( + clusterAdmin().prepareUpdateSettings() + .setPersistentSettings( + Settings.builder() + .put(InternalClusterInfoService.INTERNAL_CLUSTER_INFO_TIMEOUT_SETTING.getKey(), (String) null) + .put("indices.recovery.max_bytes_per_sec", (String) null) + .build() + ) + ); + } + } + + private Tuple removedReservedClusterStateListener(String node) { + ClusterService clusterService = internalCluster().clusterService(node); + CountDownLatch savedClusterState = new CountDownLatch(1); + AtomicLong metadataVersion = new AtomicLong(-1); + clusterService.addListener(new ClusterStateListener() { + @Override + public void clusterChanged(ClusterChangedEvent event) { + ReservedStateMetadata reservedState = event.state().metadata().reservedStateMetadata().get(FileSettingsService.NAMESPACE); + if (reservedState != null && reservedState.version() == 0L) { + clusterService.removeListener(this); + metadataVersion.set(event.state().metadata().version()); + savedClusterState.countDown(); + } + } + }); + + return new Tuple<>(savedClusterState, metadataVersion); + } + + private Tuple cleanedClusterStateListener(String node) { + ClusterService clusterService = internalCluster().clusterService(node); + CountDownLatch savedClusterState = new CountDownLatch(1); + AtomicLong metadataVersion = new AtomicLong(-1); + clusterService.addListener(new ClusterStateListener() { + @Override + public void clusterChanged(ClusterChangedEvent event) { + ReservedStateMetadata reservedState = event.state().metadata().reservedStateMetadata().get(FileSettingsService.NAMESPACE); + if (reservedState != null) { + ReservedStateHandlerMetadata handlerMetadata = reservedState.handlers().get(ReservedClusterSettingsAction.NAME); + if (handlerMetadata == null) { + fail("Should've found cluster settings in this metadata"); + } + if (handlerMetadata.keys().isEmpty()) { + clusterService.removeListener(this); + metadataVersion.set(event.state().metadata().version()); + savedClusterState.countDown(); + } + } + } + }); + + return new Tuple<>(savedClusterState, metadataVersion); + } + + public void testRestoreWithPersistedFileSettings() throws Exception { + try { + createRepository("test-repo", "fs"); + + logger.info("--> set some persistent cluster settings"); + assertAcked( + clusterAdmin().prepareUpdateSettings() + .setPersistentSettings( + Settings.builder() + .put(InternalClusterInfoService.INTERNAL_CLUSTER_INFO_TIMEOUT_SETTING.getKey(), TimeValue.timeValueSeconds(25)) + .build() + ) + ); + + ensureGreen(); + + String masterNode = internalCluster().getMasterName(); + + var savedClusterState = setupClusterStateListener(masterNode); + FileSettingsService fs = internalCluster().getInstance(FileSettingsService.class, masterNode); + + logger.info("--> write some file based settings, putting some reserved state"); + writeJSONFile(masterNode, testFileSettingsJSON); + final ClusterStateResponse savedStateResponse = assertClusterStateSaveOK(savedClusterState.v1(), savedClusterState.v2()); + assertThat( + savedStateResponse.getState().metadata().persistentSettings().get(INDICES_RECOVERY_MAX_BYTES_PER_SEC_SETTING.getKey()), + equalTo("50mb") + ); + + logger.info("--> create full snapshot"); + createFullSnapshot("test-repo", "test-snap"); + assertThat(getSnapshot("test-repo", "test-snap").state(), equalTo(SnapshotState.SUCCESS)); + + assertAcked( + clusterAdmin().prepareUpdateSettings() + .setPersistentSettings( + Settings.builder() + .put(InternalClusterInfoService.INTERNAL_CLUSTER_INFO_TIMEOUT_SETTING.getKey(), TimeValue.timeValueSeconds(55)) + .build() + ) + ); + + logger.info("--> restore global state from the snapshot"); + var removedReservedState = removedReservedClusterStateListener(masterNode); + var restoredReservedState = setupClusterStateListener(masterNode); + + clusterAdmin().prepareRestoreSnapshot("test-repo", "test-snap").setRestoreGlobalState(true).setWaitForCompletion(true).get(); + + ensureGreen(); + + // When the target cluster of a restore has an existing operator file, we don't un-reserve the reserved + // cluster state for file based settings, but instead we reset the version to 0 and 'touch' the operator file + // so that it gets re-processed. + logger.info("--> reserved state version will be reset to 0, because of snapshot restore"); + assertTrue(removedReservedState.v1().await(20, TimeUnit.SECONDS)); + + logger.info("--> reserved state would be restored"); + assertTrue(restoredReservedState.v1().await(20, TimeUnit.SECONDS)); + + final ClusterStateResponse clusterStateResponse = clusterAdmin().state( + new ClusterStateRequest().metadata(true).waitForMetadataVersion(restoredReservedState.v2().get()) + ).actionGet(); + + assertNotNull(clusterStateResponse.getState().metadata().reservedStateMetadata().get(FileSettingsService.NAMESPACE)); + + final ClusterGetSettingsAction.Response getSettingsResponse = clusterAdmin().execute( + ClusterGetSettingsAction.INSTANCE, + new ClusterGetSettingsAction.Request() + ).actionGet(); + + assertThat( + getSettingsResponse.persistentSettings().get(InternalClusterInfoService.INTERNAL_CLUSTER_INFO_TIMEOUT_SETTING.getKey()), + equalTo("25s") + ); + + // we need to remove the reserved state, so that clean-up can happen + var cleanupReservedState = cleanedClusterStateListener(masterNode); + + logger.info("--> clear the file based settings"); + writeJSONFile(masterNode, emptyFileSettingsJSON); + assertClusterStateSaveOK(cleanupReservedState.v1(), cleanupReservedState.v2()); + } finally { + // cleanup + assertAcked( + clusterAdmin().prepareUpdateSettings() + .setPersistentSettings( + Settings.builder() + .put(InternalClusterInfoService.INTERNAL_CLUSTER_INFO_TIMEOUT_SETTING.getKey(), (String) null) + .put("indices.recovery.max_bytes_per_sec", (String) null) + .build() + ) + ); + } + } + +} diff --git a/server/src/internalClusterTest/java/org/elasticsearch/search/aggregations/bucket/GeoDistanceIT.java b/server/src/internalClusterTest/java/org/elasticsearch/search/aggregations/bucket/GeoDistanceIT.java index 4755d5b2bfece..b91c481f8e641 100644 --- a/server/src/internalClusterTest/java/org/elasticsearch/search/aggregations/bucket/GeoDistanceIT.java +++ b/server/src/internalClusterTest/java/org/elasticsearch/search/aggregations/bucket/GeoDistanceIT.java @@ -18,6 +18,7 @@ import org.elasticsearch.search.aggregations.Aggregator.SubAggCollectionMode; import org.elasticsearch.search.aggregations.InternalAggregation; import org.elasticsearch.search.aggregations.bucket.histogram.Histogram; +import org.elasticsearch.search.aggregations.bucket.range.GeoDistanceAggregationBuilder; import org.elasticsearch.search.aggregations.bucket.range.Range; import org.elasticsearch.search.aggregations.bucket.range.Range.Bucket; import org.elasticsearch.search.aggregations.bucket.terms.Terms; @@ -28,9 +29,11 @@ import java.util.ArrayList; import java.util.Arrays; +import java.util.Collections; import java.util.HashSet; import java.util.List; import java.util.Set; +import java.util.function.Consumer; import static org.elasticsearch.index.query.QueryBuilders.matchAllQuery; import static org.elasticsearch.search.aggregations.AggregationBuilders.geoDistance; @@ -128,15 +131,18 @@ public void setupSuiteScopeCluster() throws Exception { } public void testSimple() throws Exception { - SearchResponse response = client().prepareSearch("idx") - .addAggregation( - geoDistance("amsterdam_rings", new GeoPoint(52.3760, 4.894)).field("location") - .unit(DistanceUnit.KILOMETERS) - .addUnboundedTo(500) - .addRange(500, 1000) - .addUnboundedFrom(1000) - ) - .get(); + List> ranges = new ArrayList<>(); + ranges.add(b -> b.addUnboundedTo(500)); + ranges.add(b -> b.addRange(500, 1000)); + ranges.add(b -> b.addUnboundedFrom(1000)); + // add ranges in any order + Collections.shuffle(ranges, random()); + GeoDistanceAggregationBuilder builder = geoDistance("amsterdam_rings", new GeoPoint(52.3760, 4.894)).field("location") + .unit(DistanceUnit.KILOMETERS); + for (Consumer range : ranges) { + range.accept(builder); + } + SearchResponse response = client().prepareSearch("idx").addAggregation(builder).get(); assertSearchResponse(response); diff --git a/server/src/internalClusterTest/java/org/elasticsearch/search/query/IntervalQueriesIT.java b/server/src/internalClusterTest/java/org/elasticsearch/search/query/IntervalQueriesIT.java new file mode 100644 index 0000000000000..c2ccfe4ee9694 --- /dev/null +++ b/server/src/internalClusterTest/java/org/elasticsearch/search/query/IntervalQueriesIT.java @@ -0,0 +1,104 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License + * 2.0 and the Server Side Public License, v 1; you may not use this file except + * in compliance with, at your election, the Elastic License 2.0 or the Server + * Side Public License, v 1. + */ + +package org.elasticsearch.search.query; + +import org.apache.lucene.analysis.Analyzer; +import org.apache.lucene.analysis.TokenStream; +import org.apache.lucene.analysis.Tokenizer; +import org.apache.lucene.analysis.core.KeywordTokenizer; +import org.elasticsearch.action.search.SearchResponse; +import org.elasticsearch.index.analysis.AnalyzerProvider; +import org.elasticsearch.index.analysis.AnalyzerScope; +import org.elasticsearch.index.query.IntervalQueryBuilder; +import org.elasticsearch.index.query.IntervalsSourceProvider; +import org.elasticsearch.indices.analysis.AnalysisModule; +import org.elasticsearch.plugins.AnalysisPlugin; +import org.elasticsearch.plugins.Plugin; +import org.elasticsearch.test.ESIntegTestCase; +import org.elasticsearch.test.InternalSettingsPlugin; + +import java.io.IOException; +import java.util.Arrays; +import java.util.Collection; +import java.util.Map; + +import static java.util.Collections.singletonMap; +import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.assertAcked; + +public class IntervalQueriesIT extends ESIntegTestCase { + + @Override + protected Collection> nodePlugins() { + return Arrays.asList(InternalSettingsPlugin.class, MockAnalysisPlugin.class); + } + + public void testEmptyIntervalsWithNestedMappings() throws InterruptedException { + assertAcked(prepareCreate("nested").setMapping(""" + { "_doc" : { + "properties" : { + "empty_text" : { "type" : "text", "analyzer" : "empty" }, + "text" : { "type" : "text" }, + "nested" : { "type" : "nested", "properties" : { "nt" : { "type" : "text" } } } + } + }} + """)); + + indexRandom( + true, + client().prepareIndex("nested").setId("1").setSource("text", "the quick brown fox jumps"), + client().prepareIndex("nested").setId("2").setSource("text", "quick brown"), + client().prepareIndex("nested").setId("3").setSource("text", "quick") + ); + + SearchResponse resp = client().prepareSearch("nested") + .setQuery( + new IntervalQueryBuilder("empty_text", new IntervalsSourceProvider.Match("an empty query", 0, true, null, null, null)) + ) + .get(); + assertEquals(0, resp.getFailedShards()); + } + + private static class EmptyAnalyzer extends Analyzer { + + @Override + protected TokenStreamComponents createComponents(String fieldName) { + Tokenizer source = new KeywordTokenizer(); + TokenStream sink = new TokenStream() { + @Override + public boolean incrementToken() throws IOException { + return false; + } + }; + return new TokenStreamComponents(source, sink); + } + } + + public static class MockAnalysisPlugin extends Plugin implements AnalysisPlugin { + + @Override + public Map>> getAnalyzers() { + return singletonMap("empty", (indexSettings, environment, name, settings) -> new AnalyzerProvider<>() { + @Override + public String name() { + return "empty"; + } + + @Override + public AnalyzerScope scope() { + return AnalyzerScope.GLOBAL; + } + + @Override + public Analyzer get() { + return new EmptyAnalyzer(); + } + }); + } + } +} diff --git a/server/src/internalClusterTest/java/org/elasticsearch/snapshots/BlobStoreIncrementalityIT.java b/server/src/internalClusterTest/java/org/elasticsearch/snapshots/BlobStoreIncrementalityIT.java index 4c278aeddb5bf..e3fdb92785503 100644 --- a/server/src/internalClusterTest/java/org/elasticsearch/snapshots/BlobStoreIncrementalityIT.java +++ b/server/src/internalClusterTest/java/org/elasticsearch/snapshots/BlobStoreIncrementalityIT.java @@ -197,7 +197,7 @@ public void testRecordCorrectSegmentCountsWithBackgroundMerges() throws Exceptio // create a situation where we temporarily have a bunch of segments until the merges can catch up long id = 0; - final int rounds = scaledRandomIntBetween(3, 5); + final int rounds = scaledRandomIntBetween(5, 9); for (int i = 0; i < rounds; ++i) { final int numDocs = scaledRandomIntBetween(100, 1000); BulkRequestBuilder request = client().prepareBulk().setRefreshPolicy(WriteRequest.RefreshPolicy.IMMEDIATE); diff --git a/server/src/main/java/org/elasticsearch/Version.java b/server/src/main/java/org/elasticsearch/Version.java index b6cf3d43cf93b..2c4c7f07c41f1 100644 --- a/server/src/main/java/org/elasticsearch/Version.java +++ b/server/src/main/java/org/elasticsearch/Version.java @@ -103,6 +103,7 @@ public class Version implements Comparable, ToXContentFragment { public static final Version V_7_17_4 = new Version(7_17_04_99, org.apache.lucene.util.Version.LUCENE_8_11_1); public static final Version V_7_17_5 = new Version(7_17_05_99, org.apache.lucene.util.Version.LUCENE_8_11_1); public static final Version V_7_17_6 = new Version(7_17_06_99, org.apache.lucene.util.Version.LUCENE_8_11_1); + public static final Version V_7_17_7 = new Version(7_17_07_99, org.apache.lucene.util.Version.LUCENE_8_11_1); public static final Version V_8_0_0 = new Version(8_00_00_99, org.apache.lucene.util.Version.LUCENE_9_0_0); public static final Version V_8_0_1 = new Version(8_00_01_99, org.apache.lucene.util.Version.LUCENE_9_0_0); public static final Version V_8_1_0 = new Version(8_01_00_99, org.apache.lucene.util.Version.LUCENE_9_0_0); @@ -118,7 +119,10 @@ public class Version implements Comparable, ToXContentFragment { public static final Version V_8_3_2 = new Version(8_03_02_99, org.apache.lucene.util.Version.LUCENE_9_2_0); public static final Version V_8_3_3 = new Version(8_03_03_99, org.apache.lucene.util.Version.LUCENE_9_2_0); public static final Version V_8_4_0 = new Version(8_04_00_99, org.apache.lucene.util.Version.LUCENE_9_3_0); - public static final Version CURRENT = V_8_4_0; + public static final Version V_8_4_1 = new Version(8_04_01_99, org.apache.lucene.util.Version.LUCENE_9_3_0); + public static final Version V_8_4_2 = new Version(8_04_02_99, org.apache.lucene.util.Version.LUCENE_9_3_0); + public static final Version V_8_4_3 = new Version(8_04_03_99, org.apache.lucene.util.Version.LUCENE_9_3_0); + public static final Version CURRENT = V_8_4_3; private static final Map idToVersion; private static final Map stringToVersion; diff --git a/server/src/main/java/org/elasticsearch/action/admin/cluster/configuration/AddVotingConfigExclusionsRequest.java b/server/src/main/java/org/elasticsearch/action/admin/cluster/configuration/AddVotingConfigExclusionsRequest.java index e9473228ba1f4..75d512683e318 100644 --- a/server/src/main/java/org/elasticsearch/action/admin/cluster/configuration/AddVotingConfigExclusionsRequest.java +++ b/server/src/main/java/org/elasticsearch/action/admin/cluster/configuration/AddVotingConfigExclusionsRequest.java @@ -102,7 +102,16 @@ Set resolveVotingConfigExclusions(ClusterState currentSta } else { assert nodeNames.length > 0; Map existingNodes = allNodes.stream() - .collect(Collectors.toMap(DiscoveryNode::getName, Function.identity())); + .collect(Collectors.toMap(DiscoveryNode::getName, Function.identity(), (n1, n2) -> { + throw new IllegalArgumentException( + org.elasticsearch.core.Strings.format( + "node name [%s] is ambiguous, matching [%s] and [%s]; specify node ID instead", + n1.getName(), + n1.descriptionWithoutAttributes(), + n2.descriptionWithoutAttributes() + ) + ); + })); for (String nodeName : nodeNames) { if (existingNodes.containsKey(nodeName)) { diff --git a/server/src/main/java/org/elasticsearch/action/admin/cluster/coordination/ClusterFormationInfoAction.java b/server/src/main/java/org/elasticsearch/action/admin/cluster/coordination/ClusterFormationInfoAction.java index 66e3383bb6dd6..cde7aecd7ed21 100644 --- a/server/src/main/java/org/elasticsearch/action/admin/cluster/coordination/ClusterFormationInfoAction.java +++ b/server/src/main/java/org/elasticsearch/action/admin/cluster/coordination/ClusterFormationInfoAction.java @@ -33,7 +33,7 @@ public class ClusterFormationInfoAction extends ActionType { public static final ClusterFormationInfoAction INSTANCE = new ClusterFormationInfoAction(); - public static final String NAME = "cluster:internal/formation/info"; + public static final String NAME = "internal:cluster/formation/info"; private ClusterFormationInfoAction() { super(NAME, ClusterFormationInfoAction.Response::new); diff --git a/server/src/main/java/org/elasticsearch/action/admin/cluster/coordination/CoordinationDiagnosticsAction.java b/server/src/main/java/org/elasticsearch/action/admin/cluster/coordination/CoordinationDiagnosticsAction.java index 913003c446d5a..6cde5080e6710 100644 --- a/server/src/main/java/org/elasticsearch/action/admin/cluster/coordination/CoordinationDiagnosticsAction.java +++ b/server/src/main/java/org/elasticsearch/action/admin/cluster/coordination/CoordinationDiagnosticsAction.java @@ -34,7 +34,7 @@ public class CoordinationDiagnosticsAction extends ActionType { public static final CoordinationDiagnosticsAction INSTANCE = new CoordinationDiagnosticsAction(); - public static final String NAME = "cluster:internal/coordination_diagnostics/info"; + public static final String NAME = "internal:cluster/coordination_diagnostics/info"; private CoordinationDiagnosticsAction() { super(NAME, CoordinationDiagnosticsAction.Response::new); diff --git a/server/src/main/java/org/elasticsearch/action/admin/cluster/coordination/MasterHistoryAction.java b/server/src/main/java/org/elasticsearch/action/admin/cluster/coordination/MasterHistoryAction.java index 5a1fa58d4a852..d9e5a3e251629 100644 --- a/server/src/main/java/org/elasticsearch/action/admin/cluster/coordination/MasterHistoryAction.java +++ b/server/src/main/java/org/elasticsearch/action/admin/cluster/coordination/MasterHistoryAction.java @@ -34,7 +34,7 @@ public class MasterHistoryAction extends ActionType { public static final MasterHistoryAction INSTANCE = new MasterHistoryAction(); - public static final String NAME = "cluster:internal/master_history/get"; + public static final String NAME = "internal:cluster/master_history/get"; private MasterHistoryAction() { super(NAME, MasterHistoryAction.Response::new); diff --git a/server/src/main/java/org/elasticsearch/action/admin/cluster/node/reload/NodesReloadSecureSettingsRequest.java b/server/src/main/java/org/elasticsearch/action/admin/cluster/node/reload/NodesReloadSecureSettingsRequest.java index 9f2b9c597dc7e..d067b43ee95d1 100644 --- a/server/src/main/java/org/elasticsearch/action/admin/cluster/node/reload/NodesReloadSecureSettingsRequest.java +++ b/server/src/main/java/org/elasticsearch/action/admin/cluster/node/reload/NodesReloadSecureSettingsRequest.java @@ -16,14 +16,18 @@ import org.elasticsearch.common.settings.SecureString; import org.elasticsearch.core.CharArrays; import org.elasticsearch.core.Nullable; +import org.elasticsearch.core.Releasable; +import org.elasticsearch.transport.TransportRequest; import java.io.IOException; import java.util.Arrays; +import java.util.Set; +import java.util.concurrent.ConcurrentHashMap; /** * Request for a reload secure settings action */ -public class NodesReloadSecureSettingsRequest extends BaseNodesRequest { +public class NodesReloadSecureSettingsRequest extends BaseNodesRequest implements Releasable { /** * The password is used to re-read and decrypt the contents @@ -70,12 +74,6 @@ public void setSecureStorePassword(SecureString secureStorePassword) { this.secureSettingsPassword = secureStorePassword; } - public void closePassword() { - if (this.secureSettingsPassword != null) { - this.secureSettingsPassword.close(); - } - } - boolean hasPassword() { return this.secureSettingsPassword != null && this.secureSettingsPassword.length() > 0; } @@ -94,4 +92,50 @@ public void writeTo(StreamOutput out) throws IOException { } } } + + // This field is intentionally not part of serialization + private final Set nodeRequests = ConcurrentHashMap.newKeySet(); + + NodeRequest newNodeRequest() { + final NodesReloadSecureSettingsRequest clone = new NodesReloadSecureSettingsRequest(nodesIds()); + if (hasPassword()) { + clone.setSecureStorePassword(getSecureSettingsPassword().clone()); + } + final NodeRequest nodeRequest = new NodeRequest(clone); + nodeRequests.add(nodeRequest); + return nodeRequest; + } + + @Override + public void close() { + if (this.secureSettingsPassword != null) { + this.secureSettingsPassword.close(); + } + nodeRequests.forEach(NodeRequest::close); + } + + public static class NodeRequest extends TransportRequest implements Releasable { + NodesReloadSecureSettingsRequest request; + + NodeRequest(StreamInput in) throws IOException { + super(in); + request = new NodesReloadSecureSettingsRequest(in); + } + + NodeRequest(NodesReloadSecureSettingsRequest request) { + this.request = request; + } + + @Override + public void writeTo(StreamOutput out) throws IOException { + super.writeTo(out); + request.writeTo(out); + } + + @Override + public void close() { + assert request.nodeRequests.isEmpty() : "potential circular reference"; + request.close(); + } + } } diff --git a/server/src/main/java/org/elasticsearch/action/admin/cluster/node/reload/TransportNodesReloadSecureSettingsAction.java b/server/src/main/java/org/elasticsearch/action/admin/cluster/node/reload/TransportNodesReloadSecureSettingsAction.java index e8c8fcfaac969..756e4312784aa 100644 --- a/server/src/main/java/org/elasticsearch/action/admin/cluster/node/reload/TransportNodesReloadSecureSettingsAction.java +++ b/server/src/main/java/org/elasticsearch/action/admin/cluster/node/reload/TransportNodesReloadSecureSettingsAction.java @@ -18,16 +18,13 @@ import org.elasticsearch.cluster.service.ClusterService; import org.elasticsearch.common.inject.Inject; import org.elasticsearch.common.io.stream.StreamInput; -import org.elasticsearch.common.io.stream.StreamOutput; import org.elasticsearch.common.settings.KeyStoreWrapper; -import org.elasticsearch.common.settings.SecureString; import org.elasticsearch.common.settings.Settings; import org.elasticsearch.env.Environment; import org.elasticsearch.plugins.PluginsService; import org.elasticsearch.plugins.ReloadablePlugin; import org.elasticsearch.tasks.Task; import org.elasticsearch.threadpool.ThreadPool; -import org.elasticsearch.transport.TransportRequest; import org.elasticsearch.transport.TransportService; import java.io.IOException; @@ -37,7 +34,7 @@ public class TransportNodesReloadSecureSettingsAction extends TransportNodesAction< NodesReloadSecureSettingsRequest, NodesReloadSecureSettingsResponse, - TransportNodesReloadSecureSettingsAction.NodeRequest, + NodesReloadSecureSettingsRequest.NodeRequest, NodesReloadSecureSettingsResponse.NodeResponse> { private final Environment environment; @@ -59,7 +56,7 @@ public TransportNodesReloadSecureSettingsAction( transportService, actionFilters, NodesReloadSecureSettingsRequest::new, - NodeRequest::new, + NodesReloadSecureSettingsRequest.NodeRequest::new, ThreadPool.Names.GENERIC, NodesReloadSecureSettingsResponse.NodeResponse.class ); @@ -77,8 +74,8 @@ protected NodesReloadSecureSettingsResponse newResponse( } @Override - protected NodeRequest newNodeRequest(NodesReloadSecureSettingsRequest request) { - return new NodeRequest(request); + protected NodesReloadSecureSettingsRequest.NodeRequest newNodeRequest(NodesReloadSecureSettingsRequest request) { + return request.newNodeRequest(); } @Override @@ -93,7 +90,7 @@ protected void doExecute( ActionListener listener ) { if (request.hasPassword() && isNodeLocal(request) == false && isNodeTransportTLSEnabled() == false) { - request.closePassword(); + request.close(); listener.onFailure( new ElasticsearchException( "Secure settings cannot be updated cluster wide when TLS for the transport layer" @@ -101,23 +98,17 @@ protected void doExecute( ) ); } else { - super.doExecute(task, request, ActionListener.wrap(response -> { - request.closePassword(); - listener.onResponse(response); - }, e -> { - request.closePassword(); - listener.onFailure(e); - })); + super.doExecute(task, request, ActionListener.runBefore(listener, request::close)); } } @Override - protected NodesReloadSecureSettingsResponse.NodeResponse nodeOperation(NodeRequest nodeReloadRequest, Task task) { + protected NodesReloadSecureSettingsResponse.NodeResponse nodeOperation( + NodesReloadSecureSettingsRequest.NodeRequest nodeReloadRequest, + Task task + ) { final NodesReloadSecureSettingsRequest request = nodeReloadRequest.request; // We default to using an empty string as the keystore password so that we mimic pre 7.3 API behavior - final SecureString secureSettingsPassword = request.hasPassword() - ? request.getSecureSettingsPassword() - : new SecureString(new char[0]); try (KeyStoreWrapper keystore = KeyStoreWrapper.load(environment.configFile())) { // reread keystore from config file if (keystore == null) { @@ -127,7 +118,7 @@ protected NodesReloadSecureSettingsResponse.NodeResponse nodeOperation(NodeReque ); } // decrypt the keystore using the password from the request - keystore.decrypt(secureSettingsPassword.getChars()); + keystore.decrypt(request.hasPassword() ? request.getSecureSettingsPassword().getChars() : new char[0]); // add the keystore to the original node settings object final Settings settingsWithKeystore = Settings.builder().put(environment.settings(), false).setSecureSettings(keystore).build(); final List exceptions = new ArrayList<>(); @@ -145,27 +136,7 @@ protected NodesReloadSecureSettingsResponse.NodeResponse nodeOperation(NodeReque } catch (final Exception e) { return new NodesReloadSecureSettingsResponse.NodeResponse(clusterService.localNode(), e); } finally { - secureSettingsPassword.close(); - } - } - - public static class NodeRequest extends TransportRequest { - - NodesReloadSecureSettingsRequest request; - - public NodeRequest(StreamInput in) throws IOException { - super(in); - request = new NodesReloadSecureSettingsRequest(in); - } - - NodeRequest(NodesReloadSecureSettingsRequest request) { - this.request = request; - } - - @Override - public void writeTo(StreamOutput out) throws IOException { - super.writeTo(out); - request.writeTo(out); + request.close(); } } diff --git a/server/src/main/java/org/elasticsearch/action/admin/indices/alias/IndicesAliasesRequest.java b/server/src/main/java/org/elasticsearch/action/admin/indices/alias/IndicesAliasesRequest.java index bc11b7e06ff57..e9f49efd4cb1c 100644 --- a/server/src/main/java/org/elasticsearch/action/admin/indices/alias/IndicesAliasesRequest.java +++ b/server/src/main/java/org/elasticsearch/action/admin/indices/alias/IndicesAliasesRequest.java @@ -554,6 +554,8 @@ public String toString() { + searchRouting + ",writeIndex=" + writeIndex + + ",isHidden=" + + isHidden + ",mustExist=" + mustExist + "]"; diff --git a/server/src/main/java/org/elasticsearch/action/search/MultiSearchRequest.java b/server/src/main/java/org/elasticsearch/action/search/MultiSearchRequest.java index 5281b4e44211e..e7d6eca23498f 100644 --- a/server/src/main/java/org/elasticsearch/action/search/MultiSearchRequest.java +++ b/server/src/main/java/org/elasticsearch/action/search/MultiSearchRequest.java @@ -379,7 +379,10 @@ public Task createTask(long id, String type, String action, TaskId parentTaskId, return new CancellableTask(id, type, action, "", parentTaskId, headers) { @Override public String getDescription() { - return requests.stream().map(SearchRequest::buildDescription).collect(Collectors.joining(action + "[", ",", "]")); + return "requests[" + + requests.size() + + "]: " + + requests.stream().map(SearchRequest::buildDescription).collect(Collectors.joining(" | ")); } }; } diff --git a/server/src/main/java/org/elasticsearch/action/search/SearchRequest.java b/server/src/main/java/org/elasticsearch/action/search/SearchRequest.java index 87bef61239445..6eb1b8adc7ad8 100644 --- a/server/src/main/java/org/elasticsearch/action/search/SearchRequest.java +++ b/server/src/main/java/org/elasticsearch/action/search/SearchRequest.java @@ -816,15 +816,21 @@ public final String buildDescription() { StringBuilder sb = new StringBuilder(); sb.append("indices["); Strings.arrayToDelimitedString(indices, ",", sb); - sb.append("], "); - sb.append("search_type[").append(searchType).append("], "); + sb.append("]"); + sb.append(", search_type[").append(searchType).append("]"); if (scroll != null) { - sb.append("scroll[").append(scroll.keepAlive()).append("], "); + sb.append(", scroll[").append(scroll.keepAlive()).append("]"); } if (source != null) { - sb.append("source[").append(source.toString(FORMAT_PARAMS)).append("]"); + sb.append(", source[").append(source.toString(FORMAT_PARAMS)).append("]"); } else { - sb.append("source[]"); + sb.append(", source[]"); + } + if (routing != null) { + sb.append(", routing[").append(routing).append("]"); + } + if (preference != null) { + sb.append(", preference[").append(preference).append("]"); } return sb.toString(); } diff --git a/server/src/main/java/org/elasticsearch/cluster/coordination/CoordinationDiagnosticsService.java b/server/src/main/java/org/elasticsearch/cluster/coordination/CoordinationDiagnosticsService.java index 57cd50eaf568f..fb1a3fa625da2 100644 --- a/server/src/main/java/org/elasticsearch/cluster/coordination/CoordinationDiagnosticsService.java +++ b/server/src/main/java/org/elasticsearch/cluster/coordination/CoordinationDiagnosticsService.java @@ -24,6 +24,7 @@ import org.elasticsearch.common.io.stream.StreamOutput; import org.elasticsearch.common.io.stream.Writeable; import org.elasticsearch.common.settings.Setting; +import org.elasticsearch.common.util.concurrent.EsRejectedExecutionException; import org.elasticsearch.core.Nullable; import org.elasticsearch.core.Releasable; import org.elasticsearch.core.Releasables; @@ -41,11 +42,11 @@ import java.util.HashSet; import java.util.List; import java.util.Locale; +import java.util.Map; import java.util.Objects; import java.util.Set; import java.util.concurrent.ConcurrentHashMap; import java.util.concurrent.ConcurrentMap; -import java.util.concurrent.CopyOnWriteArrayList; import java.util.concurrent.TimeUnit; import java.util.function.BiConsumer; import java.util.function.Consumer; @@ -83,17 +84,26 @@ public class CoordinationDiagnosticsService implements ClusterStateListener { private final int unacceptableIdentityChanges; /* - * This is a list of tasks that are periodically reaching out to other master eligible nodes to get their ClusterFormationStates for - * diagnosis. + * This is a Map of tasks that are periodically reaching out to other master eligible nodes to get their ClusterFormationStates for + * diagnosis. The key is the DisoveryNode for the master eligible node being polled, and the value is a Cancellable. * The field is accessed (reads/writes) from multiple threads, but the reference itself is only ever changed on the cluster change * event thread. */ - private volatile List clusterFormationInfoTasks = null; + // Non-private for testing + volatile Map clusterFormationInfoTasks = null; /* * This field holds the results of the tasks in the clusterFormationInfoTasks field above. The field is accessed (reads/writes) from * multiple threads, but the reference itself is only ever changed on the cluster change event thread. */ - private volatile ConcurrentMap clusterFormationResponses = null; + // Non-private for testing + volatile ConcurrentMap clusterFormationResponses = null; + + /** + * This is the amount of time that we wait before scheduling a remote request to gather diagnostic information. It is not + * user-configurable, but is non-final so that integration tests don't have to waste 10 seconds. + */ + // Non-private for testing + TimeValue remoteRequestInitialDelay = new TimeValue(10, TimeUnit.SECONDS); private static final Logger logger = LogManager.getLogger(CoordinationDiagnosticsService.class); @@ -432,8 +442,16 @@ private boolean hasSeenMasterInHasMasterLookupTimeframe() { public void clusterChanged(ClusterChangedEvent event) { DiscoveryNode currentMaster = event.state().nodes().getMasterNode(); DiscoveryNode previousMaster = event.previousState().nodes().getMasterNode(); - if (currentMaster == null && previousMaster != null) { + if ((currentMaster == null && previousMaster != null) || (currentMaster != null && previousMaster == null)) { if (masterHistoryService.getLocalMasterHistory().hasMasterGoneNullAtLeastNTimes(unacceptableNullTransitions)) { + /* + * If the master node has been going to null repeatedly, we want to make a remote request to it to see what it thinks of + * master stability. We want to query the most recent master whether the current master has just transitioned to null or + * just transitioned from null to not null. The reason that we make the latter request is that sometimes when the elected + * master goes to null the most recent master is not responsive for the duration of the request timeout (for example if + * that node is in the middle of a long GC pause which would be both the reason for it not being master and the reason it + * does not respond quickly to transport requests). + */ DiscoveryNode master = masterHistoryService.getLocalMasterHistory().getMostRecentNonNullMaster(); /* * If the most recent master was this box, there is no point in making a transport request -- we already know what this @@ -446,9 +464,9 @@ public void clusterChanged(ClusterChangedEvent event) { } if (currentMaster == null && clusterService.localNode().isMasterNode()) { /* - * This begins polling all master-eligible nodes for cluster formation information. However there's a 10-second delay before it - * starts, so in the normal situation where during a master transition it flips from master1 -> null -> master2, it the - * polling tasks will be canceled before any requests are actually made. + * This begins polling all master-eligible nodes for cluster formation information. However there's a 10-second delay + * before it starts, so in the normal situation where during a master transition it flips from master1 -> null -> + * master2 the polling tasks will be canceled before any requests are actually made. */ beginPollingClusterFormationInfo(); } else { @@ -460,14 +478,18 @@ public void clusterChanged(ClusterChangedEvent event) { * This method begins polling all known master-eligible nodes for cluster formation information. After a 10-second initial delay, it * polls each node every 10 seconds until cancelPollingClusterFormationInfo() is called. */ - private void beginPollingClusterFormationInfo() { + void beginPollingClusterFormationInfo() { assert ThreadPool.assertCurrentThreadPool(ClusterApplierService.CLUSTER_UPDATE_THREAD_NAME); cancelPollingClusterFormationInfo(); ConcurrentMap responses = new ConcurrentHashMap<>(); - List cancellables = new CopyOnWriteArrayList<>(); - beginPollingClusterFormationInfo(getMasterEligibleNodes(), responses::put, cancellables::add); - clusterFormationResponses = responses; + Map cancellables = new ConcurrentHashMap<>(); + /* + * Assignment of clusterFormationInfoTasks must be done before the call to beginPollingClusterFormationInfo because it is used + * asynchronously by rescheduleFetchConsumer, called from beginPollingClusterFormationInfo. + */ clusterFormationInfoTasks = cancellables; + clusterFormationResponses = responses; + beginPollingClusterFormationInfo(getMasterEligibleNodes(), responses::put, cancellables); } /** @@ -475,22 +497,31 @@ private void beginPollingClusterFormationInfo() { * repeats doing that until cancel() is called on all of the Cancellable that this method inserts into cancellables. This method * exists (rather than being just part of the beginPollingClusterFormationInfo() above) in order to facilitate unit testing. * @param nodeResponseConsumer A consumer for any results produced for a node by this method - * @param cancellableConsumer A consumer for any Cancellable tasks produced by this method + * @param cancellables The Map of Cancellables, one for each node being polled */ // Non-private for testing void beginPollingClusterFormationInfo( Collection masterEligibleNodes, BiConsumer nodeResponseConsumer, - Consumer cancellableConsumer + Map cancellables ) { masterEligibleNodes.forEach(masterEligibleNode -> { Consumer responseConsumer = result -> nodeResponseConsumer.accept(masterEligibleNode, result); - cancellableConsumer.accept( - fetchClusterFormationInfo( + try { + cancellables.put( masterEligibleNode, - responseConsumer.andThen(rescheduleFetchConsumer(masterEligibleNode, responseConsumer, cancellableConsumer)) - ) - ); + fetchClusterFormationInfo( + masterEligibleNode, + responseConsumer.andThen(rescheduleFetchConsumer(masterEligibleNode, responseConsumer, cancellables)) + ) + ); + } catch (EsRejectedExecutionException e) { + if (e.isExecutorShutdown()) { + logger.trace("Not rescheduling request for cluster coordination info because this node is being shutdown", e); + } else { + throw e; + } + } }); } @@ -499,30 +530,69 @@ void beginPollingClusterFormationInfo( * completed, adding the resulting Cancellable to cancellableConsumer. * @param masterEligibleNode The node being polled * @param responseConsumer The response consumer to be wrapped - * @param cancellableConsumer The list of Cancellables + * @param cancellables The Map of Cancellables, one for each node being polled * @return */ private Consumer rescheduleFetchConsumer( DiscoveryNode masterEligibleNode, Consumer responseConsumer, - Consumer cancellableConsumer + Map cancellables ) { return response -> { - cancellableConsumer.accept( - fetchClusterFormationInfo( - masterEligibleNode, - responseConsumer.andThen(rescheduleFetchConsumer(masterEligibleNode, responseConsumer, cancellableConsumer)) - ) - ); + /* + * If clusterFormationInfoTasks is null, that means that cancelPollingClusterFormationInfo() has been called, so we don't + * want to run anything new, and we want to cancel anything that might still be running in our cancellables just to be safe. + */ + if (clusterFormationInfoTasks != null) { + /* + * If cancellables is not the same as clusterFormationInfoTasks, that means that the current polling track has been + * cancelled and a new polling track has been started. So we don't want to run anything new, and we want to cancel + * anything that might still be running in our cancellables just to be safe. Note that it is possible for + * clusterFormationInfoTasks to be null at this point (since it is assigned in a different thread), so it is important + * that we don't call equals on it. + */ + if (cancellables.equals(clusterFormationInfoTasks)) { + /* + * As mentioned in the comment in cancelPollingClusterFormationInfo(), there is a slim possibility here that we will + * add a task here for a poll that has already been cancelled. But when it completes and runs rescheduleFetchConsumer() + * we will then see that clusterFormationInfoTasks does not equal cancellables, so it will not be run again. + */ + try { + cancellables.put( + masterEligibleNode, + fetchClusterFormationInfo( + masterEligibleNode, + responseConsumer.andThen(rescheduleFetchConsumer(masterEligibleNode, responseConsumer, cancellables)) + ) + ); + } catch (EsRejectedExecutionException e) { + if (e.isExecutorShutdown()) { + logger.trace("Not rescheduling request for cluster coordination info because this node is being shutdown", e); + } else { + throw e; + } + } + } else { + cancellables.values().forEach(Scheduler.Cancellable::cancel); + } + } else { + cancellables.values().forEach(Scheduler.Cancellable::cancel); + } }; } - private void cancelPollingClusterFormationInfo() { + void cancelPollingClusterFormationInfo() { assert ThreadPool.assertCurrentThreadPool(ClusterApplierService.CLUSTER_UPDATE_THREAD_NAME); - if (clusterFormationResponses != null) { - clusterFormationInfoTasks.forEach(Scheduler.Cancellable::cancel); - clusterFormationResponses = null; + if (clusterFormationInfoTasks != null) { + /* + * There is a slight risk here that a new Cancellable is added to clusterFormationInfoTasks after we begin iterating in the next + * line. We are calling this an acceptable risk because it will result in an un-cancelled un-cancellable task, but it will not + * reschedule itself so it will not be around long. It is possible that cancel() will be called on a Cancellable concurrently + * by multiple threads, but that will not cause any problems. + */ + clusterFormationInfoTasks.values().forEach(Scheduler.Cancellable::cancel); clusterFormationInfoTasks = null; + clusterFormationResponses = null; } } @@ -532,6 +602,7 @@ private void cancelPollingClusterFormationInfo() { * @param node The node to poll for cluster formation information * @param responseConsumer The consumer of the cluster formation info for the node, or the exception encountered while contacting it * @return A Cancellable for the task that is scheduled to fetch cluster formation information + * @throws EsRejectedExecutionException If the task cannot be scheduled, possibly because the node is shutting down. */ private Scheduler.Cancellable fetchClusterFormationInfo( DiscoveryNode node, @@ -585,7 +656,7 @@ private Scheduler.Cancellable fetchClusterFormationInfo( connectionListener ); } - }, new TimeValue(10, TimeUnit.SECONDS), ThreadPool.Names.SAME); + }, remoteRequestInitialDelay, ThreadPool.Names.SAME); } // Non-private for testing diff --git a/server/src/main/java/org/elasticsearch/cluster/coordination/Coordinator.java b/server/src/main/java/org/elasticsearch/cluster/coordination/Coordinator.java index 957b3cf37c03d..548ac34e4c77d 100644 --- a/server/src/main/java/org/elasticsearch/cluster/coordination/Coordinator.java +++ b/server/src/main/java/org/elasticsearch/cluster/coordination/Coordinator.java @@ -388,6 +388,7 @@ private void handleApplyCommit(ApplyCommitRequest applyCommitRequest, ActionList coordinationState.get().handleCommit(applyCommitRequest); final ClusterState committedState = hideStateIfNotRecovered(coordinationState.get().getLastAcceptedState()); applierState = mode == Mode.CANDIDATE ? clusterStateWithNoMasterBlock(committedState) : committedState; + updateSingleNodeClusterChecker(); // in case nodes increase/decrease, possibly update the single-node checker if (applyCommitRequest.getSourceNode().equals(getLocalNode())) { // master node applies the committed state at the end of the publication process, not here. applyListener.onResponse(null); @@ -755,8 +756,19 @@ private void processJoinRequest(JoinRequest joinRequest, ActionListener jo } } - private void cancelSingleNodeClusterChecker() { + private void updateSingleNodeClusterChecker() { assert Thread.holdsLock(mutex) : "Coordinator mutex not held"; + + if (mode == Mode.LEADER && applierState.nodes().size() == 1) { + if (singleNodeClusterChecker == null) { + // Make a single-node checker if none exists + singleNodeClusterChecker = transportService.getThreadPool() + .scheduleWithFixedDelay(() -> { checkSingleNodeCluster(); }, this.singleNodeClusterSeedHostsCheckInterval, Names.SAME); + } + return; + } + + // In case of a multi-node cluster, there is no need for the single-node checker so cancel it if (singleNodeClusterChecker != null) { singleNodeClusterChecker.cancel(); singleNodeClusterChecker = null; @@ -764,7 +776,7 @@ private void cancelSingleNodeClusterChecker() { } private void checkSingleNodeCluster() { - if (applierState.nodes().size() > 1) { + if (mode != Mode.LEADER || applierState.nodes().size() > 1) { return; } @@ -796,7 +808,6 @@ void becomeCandidate(String method) { mode, lastKnownLeader ); - cancelSingleNodeClusterChecker(); if (mode != Mode.CANDIDATE) { final Mode prevMode = mode; @@ -825,6 +836,7 @@ void becomeCandidate(String method) { } } + updateSingleNodeClusterChecker(); preVoteCollector.update(getPreVoteResponse(), null); } @@ -853,12 +865,7 @@ private void becomeLeader() { assert leaderChecker.leader() == null : leaderChecker.leader(); followersChecker.updateFastResponseState(getCurrentTerm(), mode); - if (applierState.nodes().size() > 1) { - cancelSingleNodeClusterChecker(); - } else if (singleNodeClusterChecker == null) { - singleNodeClusterChecker = transportService.getThreadPool() - .scheduleWithFixedDelay(() -> { checkSingleNodeCluster(); }, this.singleNodeClusterSeedHostsCheckInterval, Names.SAME); - } + updateSingleNodeClusterChecker(); } void becomeFollower(String method, DiscoveryNode leaderNode) { @@ -878,7 +885,6 @@ void becomeFollower(String method, DiscoveryNode leaderNode) { lastKnownLeader ); } - cancelSingleNodeClusterChecker(); final boolean restartLeaderChecker = (mode == Mode.FOLLOWER && Optional.of(leaderNode).equals(lastKnownLeader)) == false; @@ -889,6 +895,7 @@ void becomeFollower(String method, DiscoveryNode leaderNode) { leaderChecker.setCurrentNodes(DiscoveryNodes.EMPTY_NODES); } + updateSingleNodeClusterChecker(); lastKnownLeader = Optional.of(leaderNode); peerFinder.deactivate(leaderNode); clusterFormationFailureHelper.stop(); @@ -1040,6 +1047,8 @@ public void invariant() { assert lagDetector.getTrackedNodes().contains(getLocalNode()) == false : lagDetector.getTrackedNodes(); assert followersChecker.getKnownFollowers().equals(lagDetector.getTrackedNodes()) : followersChecker.getKnownFollowers() + " vs " + lagDetector.getTrackedNodes(); + assert singleNodeClusterChecker == null || (mode == Mode.LEADER && applierState.nodes().size() == 1) + : "Single node checker must exist iff there is a single-node cluster"; if (mode == Mode.LEADER) { final boolean becomingMaster = getStateForMasterService().term() != getCurrentTerm(); @@ -1085,10 +1094,6 @@ assert getLocalNode().equals(applierState.nodes().getMasterNode()) : coordinationState.get().getLastAcceptedConfiguration() + " != " + coordinationState.get().getLastCommittedConfiguration(); - - if (coordinationState.get().getLastAcceptedState().nodes().size() == 1) { - assert singleNodeClusterChecker != null; - } } else if (mode == Mode.FOLLOWER) { assert coordinationState.get().electionWon() == false : getLocalNode() + " is FOLLOWER so electionWon() should be false"; assert lastKnownLeader.isPresent() && (lastKnownLeader.get().equals(getLocalNode()) == false); @@ -1106,7 +1111,6 @@ assert getLocalNode().equals(applierState.nodes().getMasterNode()) assert currentPublication.map(Publication::isCommitted).orElse(true); assert preVoteCollector.getLeader().equals(lastKnownLeader.get()) : preVoteCollector; assert clusterFormationFailureHelper.isRunning() == false; - assert singleNodeClusterChecker == null; } else { assert mode == Mode.CANDIDATE; assert joinAccumulator instanceof JoinHelper.CandidateJoinAccumulator; diff --git a/server/src/main/java/org/elasticsearch/cluster/coordination/MasterHistoryService.java b/server/src/main/java/org/elasticsearch/cluster/coordination/MasterHistoryService.java index f16b5bf9135f3..657c3d73216fe 100644 --- a/server/src/main/java/org/elasticsearch/cluster/coordination/MasterHistoryService.java +++ b/server/src/main/java/org/elasticsearch/cluster/coordination/MasterHistoryService.java @@ -114,8 +114,8 @@ public List getRemoteMasterHistory() throws Exception { * @param node The node whose view of the master history we want to fetch */ public void refreshRemoteMasterHistory(DiscoveryNode node) { - Version minSupportedVersion = Version.V_8_3_0; - if (node.getVersion().onOrAfter(minSupportedVersion)) { // This was introduced in 8.3.0 + Version minSupportedVersion = Version.V_8_4_0; + if (node.getVersion().before(minSupportedVersion)) { // This was introduced in 8.3.0 (and the action name changed in 8.4.0) logger.trace( "Cannot get master history for {} because it is at version {} and {} is required", node, diff --git a/server/src/main/java/org/elasticsearch/cluster/metadata/IndexMetadataVerifier.java b/server/src/main/java/org/elasticsearch/cluster/metadata/IndexMetadataVerifier.java index 39a2754a17ecb..7c03e97d58ba1 100644 --- a/server/src/main/java/org/elasticsearch/cluster/metadata/IndexMetadataVerifier.java +++ b/server/src/main/java/org/elasticsearch/cluster/metadata/IndexMetadataVerifier.java @@ -89,7 +89,7 @@ public IndexMetadata verifyIndexMetadata(IndexMetadata indexMetadata, Version mi newMetadata = removeTierFiltering(newMetadata); // Next we have to run this otherwise if we try to create IndexSettings // with broken settings it would fail in checkMappingsCompatibility - newMetadata = archiveBrokenIndexSettings(newMetadata); + newMetadata = archiveOrDeleteBrokenIndexSettings(newMetadata); checkMappingsCompatibility(newMetadata); return newMetadata; } @@ -205,27 +205,54 @@ public Set> entrySet() { /** * Identify invalid or unknown index settings and archive them. This leniency allows Elasticsearch to load * indices even if they contain old settings that are no longer valid. + * + * When we find an invalid setting on a system index, we simply remove it instead of archiving. System indices + * are managed by Elasticsearch and manual modification of settings is limited and sometimes impossible. */ - IndexMetadata archiveBrokenIndexSettings(IndexMetadata indexMetadata) { + IndexMetadata archiveOrDeleteBrokenIndexSettings(IndexMetadata indexMetadata) { final Settings settings = indexMetadata.getSettings(); - final Settings newSettings = indexScopedSettings.archiveUnknownOrInvalidSettings( - settings, - e -> logger.warn( - "{} ignoring unknown index setting: [{}] with value [{}]; archiving", - indexMetadata.getIndex(), - e.getKey(), - e.getValue() - ), - (e, ex) -> logger.warn( - () -> format( - "%s ignoring invalid index setting: [%s] with value [%s]; archiving", + final Settings newSettings; + + if (indexMetadata.isSystem()) { + newSettings = indexScopedSettings.deleteUnknownOrInvalidSettings( + settings, + e -> logger.warn( + "{} deleting unknown system index setting: [{}] with value [{}]", indexMetadata.getIndex(), e.getKey(), e.getValue() ), - ex - ) - ); + (e, ex) -> logger.warn( + () -> format( + "%s deleting invalid system index setting: [%s] with value [%s]", + indexMetadata.getIndex(), + e.getKey(), + e.getValue() + ), + ex + ) + ); + } else { + newSettings = indexScopedSettings.archiveUnknownOrInvalidSettings( + settings, + e -> logger.warn( + "{} ignoring unknown index setting: [{}] with value [{}]; archiving", + indexMetadata.getIndex(), + e.getKey(), + e.getValue() + ), + (e, ex) -> logger.warn( + () -> format( + "%s ignoring invalid index setting: [%s] with value [%s]; archiving", + indexMetadata.getIndex(), + e.getKey(), + e.getValue() + ), + ex + ) + ); + } + if (newSettings != settings) { return IndexMetadata.builder(indexMetadata).settings(newSettings).build(); } else { diff --git a/server/src/main/java/org/elasticsearch/cluster/metadata/Metadata.java b/server/src/main/java/org/elasticsearch/cluster/metadata/Metadata.java index 506581c7ad5cf..73cac97e0ce4c 100644 --- a/server/src/main/java/org/elasticsearch/cluster/metadata/Metadata.java +++ b/server/src/main/java/org/elasticsearch/cluster/metadata/Metadata.java @@ -1778,7 +1778,7 @@ public Builder put(Map reservedStateMetadata) { /** * Adds a {@link ReservedStateMetadata} for a given namespace to the metadata builder - * @param metadata an {@link ReservedStateMetadata} + * @param metadata a {@link ReservedStateMetadata} * @return {@link Builder} */ public Builder put(ReservedStateMetadata metadata) { @@ -1786,6 +1786,16 @@ public Builder put(ReservedStateMetadata metadata) { return this; } + /** + * Removes a {@link ReservedStateMetadata} for a given namespace + * @param metadata a {@link ReservedStateMetadata} + * @return {@link Builder} + */ + public Builder removeReservedState(ReservedStateMetadata metadata) { + reservedStateMetadata.remove(metadata.namespace()); + return this; + } + public Builder indexGraveyard(final IndexGraveyard indexGraveyard) { putCustom(IndexGraveyard.TYPE, indexGraveyard); return this; diff --git a/server/src/main/java/org/elasticsearch/cluster/metadata/ReservedStateMetadata.java b/server/src/main/java/org/elasticsearch/cluster/metadata/ReservedStateMetadata.java index e738c26fe332c..d76297ba5b858 100644 --- a/server/src/main/java/org/elasticsearch/cluster/metadata/ReservedStateMetadata.java +++ b/server/src/main/java/org/elasticsearch/cluster/metadata/ReservedStateMetadata.java @@ -214,6 +214,18 @@ public Builder(String namespace) { this.errorMetadata = null; } + /** + * Creates an reserved state metadata builder + * + * @param metadata the previous metadata + */ + public Builder(ReservedStateMetadata metadata) { + this(metadata.namespace); + this.version = metadata.version; + this.handlers = new HashMap<>(metadata.handlers); + this.errorMetadata = metadata.errorMetadata; + } + /** * Creates an reserved state metadata builder * diff --git a/server/src/main/java/org/elasticsearch/cluster/metadata/ShutdownShardMigrationStatus.java b/server/src/main/java/org/elasticsearch/cluster/metadata/ShutdownShardMigrationStatus.java index d49d02f582a29..dbde71aeef67c 100644 --- a/server/src/main/java/org/elasticsearch/cluster/metadata/ShutdownShardMigrationStatus.java +++ b/server/src/main/java/org/elasticsearch/cluster/metadata/ShutdownShardMigrationStatus.java @@ -24,6 +24,8 @@ public class ShutdownShardMigrationStatus implements Writeable, ToXContentObject { private static final Version ALLOCATION_DECISION_ADDED_VERSION = Version.V_7_16_0; + public static final String NODE_ALLOCATION_DECISION_KEY = "node_allocation_decision"; + private final SingleNodeShutdownMetadata.Status status; private final long shardsRemaining; @Nullable @@ -83,7 +85,7 @@ public XContentBuilder toXContent(XContentBuilder builder, Params params) throws builder.field("explanation", explanation); } if (Objects.nonNull(allocationDecision)) { - builder.startObject("node_allocation_decision"); + builder.startObject(NODE_ALLOCATION_DECISION_KEY); { allocationDecision.toXContent(builder, params); } diff --git a/server/src/main/java/org/elasticsearch/cluster/routing/allocation/ShardsAvailabilityHealthIndicatorService.java b/server/src/main/java/org/elasticsearch/cluster/routing/allocation/ShardsAvailabilityHealthIndicatorService.java index 7fbd99aaf0073..94597db7d88f3 100644 --- a/server/src/main/java/org/elasticsearch/cluster/routing/allocation/ShardsAvailabilityHealthIndicatorService.java +++ b/server/src/main/java/org/elasticsearch/cluster/routing/allocation/ShardsAvailabilityHealthIndicatorService.java @@ -147,6 +147,16 @@ public HealthIndicatorResult calculate(boolean explain) { DIAGNOSE_SHARDS_ACTION_GUIDE ); + public static final String FIX_DELAYED_SHARDS_GUIDE = "http://ela.st/fix-delayed-shard-allocation"; + public static final Diagnosis.Definition DIAGNOSIS_WAIT_FOR_OR_FIX_DELAYED_SHARDS = new Diagnosis.Definition( + "delayed_shard_allocations", + "Elasticsearch is not allocating some shards because they are marked for delayed allocation. Shards that have become " + + "unavailable are usually marked for delayed allocation because it is more efficient to wait and see if the shards return " + + "on their own than to recover the shard immediately.", + "Elasticsearch will reallocate the shards when the delay has elapsed. No action is required by the user.", + FIX_DELAYED_SHARDS_GUIDE + ); + public static final String ENABLE_INDEX_ALLOCATION_GUIDE = "http://ela.st/fix-index-allocation"; public static final Diagnosis.Definition ACTION_ENABLE_INDEX_ROUTING_ALLOCATION = new Diagnosis.Definition( "enable_index_allocations", @@ -413,10 +423,18 @@ List diagnoseUnassignedShardRouting(ShardRouting shardRout actions.add(ACTION_RESTORE_FROM_SNAPSHOT); } break; + case NO_ATTEMPT: + if (shardRouting.unassignedInfo().isDelayed()) { + actions.add(DIAGNOSIS_WAIT_FOR_OR_FIX_DELAYED_SHARDS); + } else { + actions.addAll(explainAllocationsAndDiagnoseDeciders(shardRouting, state)); + } + break; case DECIDERS_NO: actions.addAll(explainAllocationsAndDiagnoseDeciders(shardRouting, state)); break; - default: + case DELAYED_ALLOCATION: + actions.add(DIAGNOSIS_WAIT_FOR_OR_FIX_DELAYED_SHARDS); break; } if (actions.isEmpty()) { diff --git a/server/src/main/java/org/elasticsearch/cluster/routing/allocation/decider/MaxRetryAllocationDecider.java b/server/src/main/java/org/elasticsearch/cluster/routing/allocation/decider/MaxRetryAllocationDecider.java index b044edb35e739..e039eec612f94 100644 --- a/server/src/main/java/org/elasticsearch/cluster/routing/allocation/decider/MaxRetryAllocationDecider.java +++ b/server/src/main/java/org/elasticsearch/cluster/routing/allocation/decider/MaxRetryAllocationDecider.java @@ -61,7 +61,7 @@ private static Decision decisionWithFailures( } private static Decision debugDecision(Decision decision, UnassignedInfo unassignedInfo, int numFailedAllocations, int maxRetry) { - if (decision.type() == Decision.Type.YES) { + if (decision.type() == Decision.Type.NO) { return Decision.single( Decision.Type.NO, NAME, diff --git a/server/src/main/java/org/elasticsearch/common/settings/AbstractScopedSettings.java b/server/src/main/java/org/elasticsearch/common/settings/AbstractScopedSettings.java index 6319c949e6b6d..4631641ecb119 100644 --- a/server/src/main/java/org/elasticsearch/common/settings/AbstractScopedSettings.java +++ b/server/src/main/java/org/elasticsearch/common/settings/AbstractScopedSettings.java @@ -956,6 +956,53 @@ public Settings archiveUnknownOrInvalidSettings( } } + /** + * Deletes invalid or unknown settings. Any setting that is not recognized or fails validation + * will be deleted. This behaviour is desired when dealing with unknown index settings on + * system indices. + * + * @param settings the {@link Settings} instance to scan for unknown or invalid settings + * @param unknownConsumer callback on unknown settings (consumer receives unknown key and its + * associated value) + * @param invalidConsumer callback on invalid settings (consumer receives invalid key, its + * associated value and an exception) + * @return a {@link Settings} instance with the unknown or invalid settings removed + */ + public Settings deleteUnknownOrInvalidSettings( + final Settings settings, + final Consumer> unknownConsumer, + final BiConsumer, IllegalArgumentException> invalidConsumer + ) { + Settings.Builder builder = Settings.builder(); + boolean changed = false; + for (String key : settings.keySet()) { + try { + Setting setting = get(key); + if (setting != null) { + // will throw IllegalArgumentException on invalid setting + setting.get(settings); + builder.copy(key, settings); + } else { + if (isPrivateSetting(key)) { + // will throw IllegalArgumentException on invalid setting + builder.copy(key, settings); + } else { + changed = true; + unknownConsumer.accept(new Entry(key, settings)); + } + } + } catch (IllegalArgumentException ex) { + changed = true; + invalidConsumer.accept(new Entry(key, settings), ex); + } + } + if (changed) { + return builder.build(); + } else { + return settings; + } + } + private record Entry(String key, Settings settings) implements Map.Entry { @Override diff --git a/server/src/main/java/org/elasticsearch/common/time/DateFormatters.java b/server/src/main/java/org/elasticsearch/common/time/DateFormatters.java index 17cd3bfab4954..dc73dc77c71af 100644 --- a/server/src/main/java/org/elasticsearch/common/time/DateFormatters.java +++ b/server/src/main/java/org/elasticsearch/common/time/DateFormatters.java @@ -574,7 +574,7 @@ public class DateFormatters { /* * Returns a basic formatter for a full date as four digit weekyear, two - * digit week of weekyear, and one digit day of week (xxxx'W'wwe). + * digit week of weekyear, and one digit day of week (YYYY'W'wwe). */ private static final DateFormatter STRICT_BASIC_WEEK_DATE = new JavaDateFormatter( "strict_basic_week_date", @@ -584,7 +584,7 @@ public class DateFormatters { /* * Returns a basic formatter that combines a basic weekyear date and time - * without millis, separated by a 'T' (xxxx'W'wwe'T'HHmmssX). + * without millis, separated by a 'T' (YYYY'W'wwe'T'HHmmssX). */ private static final DateFormatter STRICT_BASIC_WEEK_DATE_TIME_NO_MILLIS = new JavaDateFormatter( "strict_basic_week_date_time_no_millis", @@ -616,7 +616,7 @@ public class DateFormatters { /* * Returns a basic formatter that combines a basic weekyear date and time, - * separated by a 'T' (xxxx'W'wwe'T'HHmmss.SSSX). + * separated by a 'T' (YYYY'W'wwe'T'HHmmss.SSSX). */ private static final DateFormatter STRICT_BASIC_WEEK_DATE_TIME = new JavaDateFormatter( "strict_basic_week_date_time", @@ -1080,13 +1080,13 @@ public class DateFormatters { /* * Returns a formatter for a full date as four digit weekyear, two digit - * week of weekyear, and one digit day of week (xxxx-'W'ww-e). + * week of weekyear, and one digit day of week (YYYY-'W'ww-e). */ private static final DateFormatter STRICT_WEEK_DATE = new JavaDateFormatter("strict_week_date", ISO_WEEK_DATE); /* * Returns a formatter that combines a full weekyear date and time without millis, - * separated by a 'T' (xxxx-'W'ww-e'T'HH:mm:ssZZ). + * separated by a 'T' (YYYY-'W'ww-e'T'HH:mm:ssZZ). */ private static final DateFormatter STRICT_WEEK_DATE_TIME_NO_MILLIS = new JavaDateFormatter( "strict_week_date_time_no_millis", @@ -1109,7 +1109,7 @@ public class DateFormatters { /* * Returns a formatter that combines a full weekyear date and time, - * separated by a 'T' (xxxx-'W'ww-e'T'HH:mm:ss.SSSZZ). + * separated by a 'T' (YYYY-'W'ww-e'T'HH:mm:ss.SSSZZ). */ private static final DateFormatter STRICT_WEEK_DATE_TIME = new JavaDateFormatter( "strict_week_date_time", @@ -1153,13 +1153,13 @@ public class DateFormatters { /* * Returns a formatter for a four digit weekyear and two digit week of - * weekyear. (xxxx-'W'ww) + * weekyear. (YYYY-'W'ww) */ private static final DateFormatter STRICT_WEEKYEAR_WEEK = new JavaDateFormatter("strict_weekyear_week", STRICT_WEEKYEAR_WEEK_FORMATTER); /* * Returns a formatter for a four digit weekyear, two digit week of - * weekyear, and one digit day of week. (xxxx-'W'ww-e) + * weekyear, and one digit day of week. (YYYY-'W'ww-e) */ private static final DateFormatter STRICT_WEEKYEAR_WEEK_DAY = new JavaDateFormatter( "strict_weekyear_week_day", @@ -1693,7 +1693,7 @@ public class DateFormatters { /* * Returns a formatter that combines a full weekyear date and time, - * separated by a 'T' (xxxx-'W'ww-e'T'HH:mm:ss.SSSZZ). + * separated by a 'T' (YYYY-'W'ww-e'T'HH:mm:ss.SSSZZ). */ private static final DateFormatter WEEK_DATE_TIME = new JavaDateFormatter( "week_date_time", @@ -1718,7 +1718,7 @@ public class DateFormatters { /* * Returns a formatter that combines a full weekyear date and time, - * separated by a 'T' (xxxx-'W'ww-e'T'HH:mm:ssZZ). + * separated by a 'T' (YYYY-'W'ww-e'T'HH:mm:ssZZ). */ private static final DateFormatter WEEK_DATE_TIME_NO_MILLIS = new JavaDateFormatter( "week_date_time_no_millis", @@ -1741,7 +1741,7 @@ public class DateFormatters { /* * Returns a basic formatter that combines a basic weekyear date and time, - * separated by a 'T' (xxxx'W'wwe'T'HHmmss.SSSX). + * separated by a 'T' (YYYY'W'wwe'T'HHmmss.SSSX). */ private static final DateFormatter BASIC_WEEK_DATE_TIME = new JavaDateFormatter( "basic_week_date_time", @@ -1763,7 +1763,7 @@ public class DateFormatters { /* * Returns a basic formatter that combines a basic weekyear date and time, - * separated by a 'T' (xxxx'W'wwe'T'HHmmssX). + * separated by a 'T' (YYYY'W'wwe'T'HHmmssX). */ private static final DateFormatter BASIC_WEEK_DATE_TIME_NO_MILLIS = new JavaDateFormatter( "basic_week_date_time_no_millis", @@ -1906,13 +1906,13 @@ public class DateFormatters { /* * Returns a formatter for a full date as four digit weekyear, two digit - * week of weekyear, and one digit day of week (xxxx-'W'ww-e). + * week of weekyear, and one digit day of week (YYYY-'W'ww-e). */ private static final DateFormatter WEEK_DATE = new JavaDateFormatter("week_date", ISO_WEEK_DATE, WEEK_DATE_FORMATTER); /* * Returns a formatter for a four digit weekyear and two digit week of - * weekyear. (xxxx-'W'ww) + * weekyear. (YYYY-'W'ww) */ private static final DateFormatter WEEKYEAR_WEEK = new JavaDateFormatter( "weekyear_week", @@ -1926,7 +1926,7 @@ public class DateFormatters { /* * Returns a formatter for a four digit weekyear, two digit week of - * weekyear, and one digit day of week. (xxxx-'W'ww-e) + * weekyear, and one digit day of week. (YYYY-'W'ww-e) */ private static final DateFormatter WEEKYEAR_WEEK_DAY = new JavaDateFormatter( "weekyear_week_day", diff --git a/server/src/main/java/org/elasticsearch/common/time/EpochTime.java b/server/src/main/java/org/elasticsearch/common/time/EpochTime.java index 2bb3cde3ab9ef..d9df109bda0cd 100644 --- a/server/src/main/java/org/elasticsearch/common/time/EpochTime.java +++ b/server/src/main/java/org/elasticsearch/common/time/EpochTime.java @@ -252,7 +252,7 @@ public long getFrom(TemporalAccessor temporal) { static final DateFormatter SECONDS_FORMATTER = new JavaDateFormatter( "epoch_second", SECONDS_FORMATTER1, - builder -> builder.parseDefaulting(ChronoField.NANO_OF_SECOND, 999_999_999L), + (builder, parser) -> builder.parseDefaulting(ChronoField.NANO_OF_SECOND, 999_999_999L), SECONDS_FORMATTER1, SECONDS_FORMATTER2 ); @@ -260,7 +260,7 @@ public long getFrom(TemporalAccessor temporal) { static final DateFormatter MILLIS_FORMATTER = new JavaDateFormatter( "epoch_millis", MILLISECONDS_FORMATTER1, - builder -> builder.parseDefaulting(EpochTime.NANOS_OF_MILLI, 999_999L), + (builder, parser) -> builder.parseDefaulting(EpochTime.NANOS_OF_MILLI, 999_999L), MILLISECONDS_FORMATTER1, MILLISECONDS_FORMATTER2 ); diff --git a/server/src/main/java/org/elasticsearch/common/time/JavaDateFormatter.java b/server/src/main/java/org/elasticsearch/common/time/JavaDateFormatter.java index 6a84a7f215f16..f83b6a66da400 100644 --- a/server/src/main/java/org/elasticsearch/common/time/JavaDateFormatter.java +++ b/server/src/main/java/org/elasticsearch/common/time/JavaDateFormatter.java @@ -22,10 +22,44 @@ import java.util.List; import java.util.Locale; import java.util.Objects; -import java.util.function.Consumer; +import java.util.function.BiConsumer; import java.util.function.UnaryOperator; class JavaDateFormatter implements DateFormatter { + /** + * A default consumer that allows to round up fields (used for range searches, optional fields missing) + * it relies on toString implementation of DateTimeFormatter and ChronoField. + * For instance for pattern + * the parser would have a toString() + * + * Value(MonthOfYear,2)'/'Value(DayOfMonth,2)'/'Value(YearOfEra,4,19,EXCEEDS_PAD)' + * 'Value(ClockHourOfAmPm,2)':'Value(MinuteOfHour,2)' 'Text(AmPmOfDay,SHORT) + * + * and ChronoField.CLOCK_HOUR_OF_AMPM would have toString() ClockHourOfAmPm + * this allows the rounding logic to default CLOCK_HOUR_OF_AMPM field instead of HOUR_OF_DAY + * without this logic, the rounding would result in a conflict as HOUR_OF_DAY would be missing, but CLOCK_HOUR_OF_AMPM would be provided + */ + private static final BiConsumer DEFAULT_ROUND_UP = (builder, parser) -> { + String parserAsString = parser.toString(); + if (parserAsString.contains(ChronoField.MONTH_OF_YEAR.toString())) { + builder.parseDefaulting(ChronoField.MONTH_OF_YEAR, 1L); + } + if (parserAsString.contains(ChronoField.DAY_OF_MONTH.toString())) { + builder.parseDefaulting(ChronoField.DAY_OF_MONTH, 1L); + } + if (parserAsString.contains(ChronoField.CLOCK_HOUR_OF_AMPM.toString())) { + builder.parseDefaulting(ChronoField.CLOCK_HOUR_OF_AMPM, 11L); + builder.parseDefaulting(ChronoField.AMPM_OF_DAY, 1L); + } else if (parserAsString.contains(ChronoField.HOUR_OF_AMPM.toString())) { + builder.parseDefaulting(ChronoField.HOUR_OF_AMPM, 11L); + builder.parseDefaulting(ChronoField.AMPM_OF_DAY, 1L); + } else { + builder.parseDefaulting(ChronoField.HOUR_OF_DAY, 23L); + } + builder.parseDefaulting(ChronoField.MINUTE_OF_HOUR, 59L); + builder.parseDefaulting(ChronoField.SECOND_OF_MINUTE, 59L); + builder.parseDefaulting(ChronoField.NANO_OF_SECOND, 999_999_999L); + }; private final String format; private final DateTimeFormatter printer; @@ -50,12 +84,7 @@ JavaDateFormatter getRoundupParser() { format, printer, // set up base fields which should be used for default parsing, when we round up for date math - builder -> builder.parseDefaulting(ChronoField.MONTH_OF_YEAR, 1L) - .parseDefaulting(ChronoField.DAY_OF_MONTH, 1L) - .parseDefaulting(ChronoField.HOUR_OF_DAY, 23L) - .parseDefaulting(ChronoField.MINUTE_OF_HOUR, 59L) - .parseDefaulting(ChronoField.SECOND_OF_MINUTE, 59L) - .parseDefaulting(ChronoField.NANO_OF_SECOND, 999_999_999L), + DEFAULT_ROUND_UP, parsers ); } @@ -64,7 +93,7 @@ JavaDateFormatter getRoundupParser() { JavaDateFormatter( String format, DateTimeFormatter printer, - Consumer roundupParserConsumer, + BiConsumer roundupParserConsumer, DateTimeFormatter... parsers ) { if (printer == null) { @@ -105,7 +134,7 @@ private static DateTimeFormatter[] parsersArray(DateTimeFormatter printer, DateT */ private static RoundUpFormatter createRoundUpParser( String format, - Consumer roundupParserConsumer, + BiConsumer roundupParserConsumer, Locale locale, DateTimeFormatter[] parsers ) { @@ -113,7 +142,7 @@ private static RoundUpFormatter createRoundUpParser( return new RoundUpFormatter(format, mapParsers(parser -> { DateTimeFormatterBuilder builder = new DateTimeFormatterBuilder(); builder.append(parser); - roundupParserConsumer.accept(builder); + roundupParserConsumer.accept(builder, parser); return builder.toFormatter(locale); }, parsers)); } diff --git a/server/src/main/java/org/elasticsearch/index/mapper/CompositeRuntimeField.java b/server/src/main/java/org/elasticsearch/index/mapper/CompositeRuntimeField.java index a8e9b487160de..ea8fbb0dcd60b 100644 --- a/server/src/main/java/org/elasticsearch/index/mapper/CompositeRuntimeField.java +++ b/server/src/main/java/org/elasticsearch/index/mapper/CompositeRuntimeField.java @@ -17,6 +17,7 @@ import java.util.ArrayList; import java.util.Collection; import java.util.Collections; +import java.util.HashMap; import java.util.List; import java.util.Map; import java.util.Objects; @@ -85,7 +86,12 @@ protected RuntimeField createRuntimeField(MappingParserContext parserContext) { name, lookup -> factory.newFactory(name, script.get().getParams(), lookup) ); - Map runtimeFields = RuntimeField.parseRuntimeFields(fields.getValue(), parserContext, builder, false); + Map runtimeFields = RuntimeField.parseRuntimeFields( + new HashMap<>(fields.getValue()), + parserContext, + builder, + false + ); return new CompositeRuntimeField(name, getParameters(), runtimeFields.values()); } }); @@ -118,11 +124,6 @@ public XContentBuilder toXContent(XContentBuilder builder, Params params) throws for (FieldMapper.Parameter parameter : parameters) { parameter.toXContent(builder, includeDefaults); } - builder.startObject("fields"); - for (RuntimeField subfield : subfields) { - subfield.toXContent(builder, params); - } - builder.endObject(); builder.endObject(); return builder; } diff --git a/server/src/main/java/org/elasticsearch/index/mapper/NestedObjectMapper.java b/server/src/main/java/org/elasticsearch/index/mapper/NestedObjectMapper.java index 68142091cde22..6533e48d893a4 100644 --- a/server/src/main/java/org/elasticsearch/index/mapper/NestedObjectMapper.java +++ b/server/src/main/java/org/elasticsearch/index/mapper/NestedObjectMapper.java @@ -170,7 +170,7 @@ public XContentBuilder toXContent(XContentBuilder builder, Params params) throws } @Override - public ObjectMapper merge(Mapper mergeWith, MapperService.MergeReason reason, MapperBuilderContext mapperBuilderContext) { + public ObjectMapper merge(Mapper mergeWith, MapperService.MergeReason reason, MapperBuilderContext parentBuilderContext) { if ((mergeWith instanceof NestedObjectMapper) == false) { throw new IllegalArgumentException("can't merge a non nested mapping [" + mergeWith.name() + "] with a nested mapping"); } @@ -191,7 +191,7 @@ public ObjectMapper merge(Mapper mergeWith, MapperService.MergeReason reason, Ma throw new MapperException("the [include_in_root] parameter can't be updated on a nested object mapping"); } } - toMerge.doMerge(mergeWithObject, reason, mapperBuilderContext); + toMerge.doMerge(mergeWithObject, reason, parentBuilderContext); return toMerge; } diff --git a/server/src/main/java/org/elasticsearch/index/mapper/ObjectMapper.java b/server/src/main/java/org/elasticsearch/index/mapper/ObjectMapper.java index 9b3786015669e..e76b29e2bbd70 100644 --- a/server/src/main/java/org/elasticsearch/index/mapper/ObjectMapper.java +++ b/server/src/main/java/org/elasticsearch/index/mapper/ObjectMapper.java @@ -150,6 +150,11 @@ protected final Map buildMappers(boolean root, MapperBuilderCont assert mapper instanceof ObjectMapper == false || subobjects.value() : "unexpected object while subobjects are disabled"; Mapper existing = mappers.get(mapper.simpleName()); if (existing != null) { + // The same mappings or document may hold the same field twice, either because duplicated JSON keys are allowed or + // the same field is provided using the object notation as well as the dot notation at the same time. + // This can also happen due to multiple index templates being merged into a single mappings definition using + // XContentHelper#mergeDefaults, again in case some index templates contained mappings for the same field using a + // mix of object notation and dot notation. mapper = existing.merge(mapper, mapperBuilderContext); } mappers.put(mapper.simpleName(), mapper); @@ -426,7 +431,11 @@ public void validate(MappingLookup mappers) { } } - public ObjectMapper merge(Mapper mergeWith, MergeReason reason, MapperBuilderContext mapperBuilderContext) { + protected MapperBuilderContext createChildContext(MapperBuilderContext mapperBuilderContext, String name) { + return mapperBuilderContext.createChildContext(name); + } + + public ObjectMapper merge(Mapper mergeWith, MergeReason reason, MapperBuilderContext parentBuilderContext) { if ((mergeWith instanceof ObjectMapper) == false) { throw new IllegalArgumentException("can't merge a non object mapping [" + mergeWith.name() + "] with an object mapping"); } @@ -436,12 +445,11 @@ public ObjectMapper merge(Mapper mergeWith, MergeReason reason, MapperBuilderCon } ObjectMapper mergeWithObject = (ObjectMapper) mergeWith; ObjectMapper merged = clone(); - merged.doMerge(mergeWithObject, reason, mapperBuilderContext); + merged.doMerge(mergeWithObject, reason, parentBuilderContext); return merged; } - protected void doMerge(final ObjectMapper mergeWith, MergeReason reason, MapperBuilderContext mapperBuilderContext) { - + protected void doMerge(final ObjectMapper mergeWith, MergeReason reason, MapperBuilderContext parentBuilderContext) { if (mergeWith.dynamic != null) { this.dynamic = mergeWith.dynamic; } @@ -462,6 +470,7 @@ protected void doMerge(final ObjectMapper mergeWith, MergeReason reason, MapperB } } + MapperBuilderContext objectBuilderContext = createChildContext(parentBuilderContext, simpleName()); Map mergedMappers = null; for (Mapper mergeWithMapper : mergeWith) { Mapper mergeIntoMapper = (mergedMappers == null ? mappers : mergedMappers).get(mergeWithMapper.simpleName()); @@ -470,8 +479,7 @@ protected void doMerge(final ObjectMapper mergeWith, MergeReason reason, MapperB if (mergeIntoMapper == null) { merged = mergeWithMapper; } else if (mergeIntoMapper instanceof ObjectMapper objectMapper) { - MapperBuilderContext childContext = mapperBuilderContext.createChildContext(objectMapper.simpleName()); - merged = objectMapper.merge(mergeWithMapper, reason, childContext); + merged = objectMapper.merge(mergeWithMapper, reason, objectBuilderContext); } else { assert mergeIntoMapper instanceof FieldMapper || mergeIntoMapper instanceof FieldAliasMapper; if (mergeWithMapper instanceof ObjectMapper) { @@ -485,7 +493,7 @@ protected void doMerge(final ObjectMapper mergeWith, MergeReason reason, MapperB if (reason == MergeReason.INDEX_TEMPLATE) { merged = mergeWithMapper; } else { - merged = mergeIntoMapper.merge(mergeWithMapper, mapperBuilderContext); + merged = mergeIntoMapper.merge(mergeWithMapper, objectBuilderContext); } } if (mergedMappers == null) { diff --git a/server/src/main/java/org/elasticsearch/index/mapper/RootObjectMapper.java b/server/src/main/java/org/elasticsearch/index/mapper/RootObjectMapper.java index f9b4cdcecbc94..288e2a1b60aa9 100644 --- a/server/src/main/java/org/elasticsearch/index/mapper/RootObjectMapper.java +++ b/server/src/main/java/org/elasticsearch/index/mapper/RootObjectMapper.java @@ -322,13 +322,19 @@ RuntimeField getRuntimeField(String name) { } @Override - public RootObjectMapper merge(Mapper mergeWith, MergeReason reason, MapperBuilderContext mapperBuilderContext) { - return (RootObjectMapper) super.merge(mergeWith, reason, mapperBuilderContext); + protected MapperBuilderContext createChildContext(MapperBuilderContext mapperBuilderContext, String name) { + assert mapperBuilderContext == MapperBuilderContext.ROOT; + return mapperBuilderContext; } @Override - protected void doMerge(ObjectMapper mergeWith, MergeReason reason, MapperBuilderContext mapperBuilderContext) { - super.doMerge(mergeWith, reason, mapperBuilderContext); + public RootObjectMapper merge(Mapper mergeWith, MergeReason reason, MapperBuilderContext parentBuilderContext) { + return (RootObjectMapper) super.merge(mergeWith, reason, parentBuilderContext); + } + + @Override + protected void doMerge(ObjectMapper mergeWith, MergeReason reason, MapperBuilderContext parentBuilderContext) { + super.doMerge(mergeWith, reason, parentBuilderContext); RootObjectMapper mergeWithObject = (RootObjectMapper) mergeWith; if (mergeWithObject.numericDetection.explicit()) { this.numericDetection = mergeWithObject.numericDetection; diff --git a/server/src/main/java/org/elasticsearch/index/query/IntervalBuilder.java b/server/src/main/java/org/elasticsearch/index/query/IntervalBuilder.java index 5f158a1a733e0..6f75702032c75 100644 --- a/server/src/main/java/org/elasticsearch/index/query/IntervalBuilder.java +++ b/server/src/main/java/org/elasticsearch/index/query/IntervalBuilder.java @@ -225,6 +225,8 @@ protected List analyzeGraph(TokenStream source) throws IOExcept @Override public IntervalIterator intervals(String field, LeafReaderContext ctx) { return new IntervalIterator() { + boolean exhausted = false; + @Override public int start() { return NO_MORE_INTERVALS; @@ -252,16 +254,18 @@ public float matchCost() { @Override public int docID() { - return NO_MORE_DOCS; + return exhausted ? NO_MORE_DOCS : -1; } @Override public int nextDoc() { + exhausted = true; return NO_MORE_DOCS; } @Override public int advance(int target) { + exhausted = true; return NO_MORE_DOCS; } diff --git a/server/src/main/java/org/elasticsearch/ingest/CompoundProcessor.java b/server/src/main/java/org/elasticsearch/ingest/CompoundProcessor.java index a70f32b77e679..3d5637c36f012 100644 --- a/server/src/main/java/org/elasticsearch/ingest/CompoundProcessor.java +++ b/server/src/main/java/org/elasticsearch/ingest/CompoundProcessor.java @@ -327,7 +327,7 @@ static IngestProcessorException newCompoundProcessorException(Exception e, Proce } if (document != null) { List pipelineStack = document.getPipelineStack(); - if (pipelineStack.size() > 1) { + if (pipelineStack.isEmpty() == false) { exception.addHeader("pipeline_origin", pipelineStack); } } diff --git a/server/src/main/java/org/elasticsearch/ingest/IngestDocMetadata.java b/server/src/main/java/org/elasticsearch/ingest/IngestDocMetadata.java index 0897f1a3175e4..ba2283c57cf4b 100644 --- a/server/src/main/java/org/elasticsearch/ingest/IngestDocMetadata.java +++ b/server/src/main/java/org/elasticsearch/ingest/IngestDocMetadata.java @@ -84,7 +84,7 @@ protected static Map metadataMap(String index, String id, long v } @Override - public ZonedDateTime getTimestamp() { + public ZonedDateTime getNow() { return timestamp; } } diff --git a/server/src/main/java/org/elasticsearch/ingest/IngestDocument.java b/server/src/main/java/org/elasticsearch/ingest/IngestDocument.java index 715ba748e6049..3a3c2349aea25 100644 --- a/server/src/main/java/org/elasticsearch/ingest/IngestDocument.java +++ b/server/src/main/java/org/elasticsearch/ingest/IngestDocument.java @@ -59,7 +59,7 @@ public final class IngestDocument { public IngestDocument(String index, String id, long version, String routing, VersionType versionType, Map source) { this.sourceAndMetadata = new IngestCtxMap(index, id, version, routing, versionType, ZonedDateTime.now(ZoneOffset.UTC), source); this.ingestMetadata = new HashMap<>(); - this.ingestMetadata.put(TIMESTAMP, sourceAndMetadata.getMetadata().getTimestamp()); + this.ingestMetadata.put(TIMESTAMP, sourceAndMetadata.getMetadata().getNow()); } /** diff --git a/server/src/main/java/org/elasticsearch/lucene/queries/BlendedTermQuery.java b/server/src/main/java/org/elasticsearch/lucene/queries/BlendedTermQuery.java index a57feef2f9b23..0c08f58909a01 100644 --- a/server/src/main/java/org/elasticsearch/lucene/queries/BlendedTermQuery.java +++ b/server/src/main/java/org/elasticsearch/lucene/queries/BlendedTermQuery.java @@ -148,7 +148,10 @@ protected int compare(int i, int j) { if (prev > current) { actualDf++; } - contexts[i] = ctx = adjustDF(reader.getContext(), ctx, Math.min(maxDoc, actualDf)); + + int docCount = reader.getDocCount(terms[i].field()); + int newDocFreq = Math.min(actualDf, docCount); + contexts[i] = ctx = adjustDF(reader.getContext(), ctx, newDocFreq); prev = current; sumTTF += ctx.totalTermFreq(); } diff --git a/server/src/main/java/org/elasticsearch/node/Node.java b/server/src/main/java/org/elasticsearch/node/Node.java index 1fbd21a4aea66..d51e5a89b246a 100644 --- a/server/src/main/java/org/elasticsearch/node/Node.java +++ b/server/src/main/java/org/elasticsearch/node/Node.java @@ -817,6 +817,13 @@ protected Node( transportService, indicesService ); + + FileSettingsService fileSettingsService = new FileSettingsService( + clusterService, + actionModule.getReservedClusterStateService(), + environment + ); + RestoreService restoreService = new RestoreService( clusterService, repositoryService, @@ -826,7 +833,8 @@ protected Node( indexMetadataVerifier, shardLimitValidator, systemIndices, - indicesService + indicesService, + fileSettingsService ); final DiskThresholdMonitor diskThresholdMonitor = new DiskThresholdMonitor( settings, @@ -946,12 +954,6 @@ protected Node( ? new HealthMetadataService(clusterService, settings) : null; - FileSettingsService fileSettingsService = new FileSettingsService( - clusterService, - actionModule.getReservedClusterStateService(), - environment - ); - modules.add(b -> { b.bind(Node.class).toInstance(this); b.bind(NodeService.class).toInstance(nodeService); diff --git a/server/src/main/java/org/elasticsearch/repositories/blobstore/BlobStoreRepository.java b/server/src/main/java/org/elasticsearch/repositories/blobstore/BlobStoreRepository.java index 3b6f61aad09ee..6e667f2e5cee4 100644 --- a/server/src/main/java/org/elasticsearch/repositories/blobstore/BlobStoreRepository.java +++ b/server/src/main/java/org/elasticsearch/repositories/blobstore/BlobStoreRepository.java @@ -2026,6 +2026,7 @@ private static String previousWriterMessage(@Nullable Tuple previo private void markRepoCorrupted(long corruptedGeneration, Exception originalException, ActionListener listener) { assert corruptedGeneration != RepositoryData.UNKNOWN_REPO_GEN; assert bestEffortConsistency == false; + logger.warn(() -> "Marking repository [" + metadata.name() + "] as corrupted", originalException); submitUnbatchedTask( "mark repository corrupted [" + metadata.name() + "][" + corruptedGeneration + "]", new ClusterStateUpdateTask() { diff --git a/server/src/main/java/org/elasticsearch/reservedstate/ReservedClusterStateHandler.java b/server/src/main/java/org/elasticsearch/reservedstate/ReservedClusterStateHandler.java index 47c412e0ea2f9..aa3cd6f4cd869 100644 --- a/server/src/main/java/org/elasticsearch/reservedstate/ReservedClusterStateHandler.java +++ b/server/src/main/java/org/elasticsearch/reservedstate/ReservedClusterStateHandler.java @@ -28,8 +28,6 @@ *

*/ public interface ReservedClusterStateHandler { - String CONTENT = "content"; - /** * Unique identifier for the handler. * diff --git a/server/src/main/java/org/elasticsearch/reservedstate/service/FileSettingsService.java b/server/src/main/java/org/elasticsearch/reservedstate/service/FileSettingsService.java index a8141e8f711fa..c62d2b8658eae 100644 --- a/server/src/main/java/org/elasticsearch/reservedstate/service/FileSettingsService.java +++ b/server/src/main/java/org/elasticsearch/reservedstate/service/FileSettingsService.java @@ -13,6 +13,8 @@ import org.elasticsearch.cluster.ClusterChangedEvent; import org.elasticsearch.cluster.ClusterState; import org.elasticsearch.cluster.ClusterStateListener; +import org.elasticsearch.cluster.metadata.Metadata; +import org.elasticsearch.cluster.metadata.ReservedStateMetadata; import org.elasticsearch.cluster.service.ClusterService; import org.elasticsearch.common.component.AbstractLifecycleComponent; import org.elasticsearch.env.Environment; @@ -28,6 +30,8 @@ import java.nio.file.WatchKey; import java.nio.file.WatchService; import java.nio.file.attribute.BasicFileAttributes; +import java.nio.file.attribute.FileTime; +import java.time.Instant; import java.util.concurrent.CountDownLatch; import java.util.function.Consumer; @@ -48,7 +52,7 @@ public class FileSettingsService extends AbstractLifecycleComponent implements C private static final Logger logger = LogManager.getLogger(FileSettingsService.class); private static final String SETTINGS_FILE_NAME = "settings.json"; - static final String NAMESPACE = "file_settings"; + public static final String NAMESPACE = "file_settings"; private final ClusterService clusterService; private final ReservedClusterStateService stateService; @@ -56,9 +60,11 @@ public class FileSettingsService extends AbstractLifecycleComponent implements C private WatchService watchService; // null; private CountDownLatch watcherThreadLatch; + private volatile CountDownLatch processingLatch; private volatile FileUpdateState fileUpdateState = null; private volatile WatchKey settingsDirWatchKey = null; + private volatile WatchKey configDirWatchKey = null; private volatile boolean active = false; private volatile boolean initialState = true; @@ -134,15 +140,67 @@ public void clusterChanged(ClusterChangedEvent event) { } private void startIfMaster(ClusterState clusterState) { - setWatching(currentNodeMaster(clusterState), initialState); + if (currentNodeMaster(clusterState)) { + startWatcher(clusterState, initialState); + } else { + stopWatcher(); + } initialState = false; } - private void setWatching(boolean watching, boolean initialState) { - if (watching) { - startWatcher(initialState); - } else { - stopWatcher(); + /** + * Used by snapshot restore service {@link org.elasticsearch.snapshots.RestoreService} to prepare the reserved + * state of the snapshot for the current cluster. + *

+ * If the current cluster where we are restoring the snapshot into has any operator file based settings, we'll + * reset the reserved state version to 0. + *

+ * If there's no file based settings file in this cluster, we'll remove all state reservations for + * file based settings from the cluster state. + * @param clusterState the cluster state before snapshot restore + * @param mdBuilder the current metadata builder for the new cluster state + */ + public void handleSnapshotRestore(ClusterState clusterState, Metadata.Builder mdBuilder) { + assert currentNodeMaster(clusterState); + + ReservedStateMetadata fileSettingsMetadata = clusterState.metadata().reservedStateMetadata().get(NAMESPACE); + + // When we restore from a snapshot we remove the reserved cluster state for file settings, + // since we don't know the current operator configuration, e.g. file settings could be disabled + // on the target cluster. If file settings exist and the cluster state has lost it's reserved + // state for the "file_settings" namespace, we touch our file settings file to cause it to re-process the file. + if (watching() && Files.exists(operatorSettingsFile())) { + if (fileSettingsMetadata != null) { + ReservedStateMetadata withResetVersion = new ReservedStateMetadata.Builder(fileSettingsMetadata).version(0L).build(); + mdBuilder.put(withResetVersion); + } + } else if (fileSettingsMetadata != null) { + mdBuilder.removeReservedState(fileSettingsMetadata); + } + } + + /** + * 'Touches' the settings file so the file watcher will re-processes it. + *

+ * The file processing is asynchronous, the cluster state or the file must be already updated such that + * the version information in the file is newer than what's already saved as processed in the + * cluster state. + * + * For snapshot restores we first must restore the snapshot and then force a refresh, since the cluster state + * metadata version must be reset to 0 and saved in the cluster state. + */ + private void refreshExistingFileStateIfNeeded(ClusterState clusterState) { + if (watching()) { + ReservedStateMetadata fileSettingsMetadata = clusterState.metadata().reservedStateMetadata().get(NAMESPACE); + // We check if the version was reset to 0, and force an update if a file exists. This can happen in situations + // like snapshot restores. + if (fileSettingsMetadata != null && fileSettingsMetadata.version() == 0L && Files.exists(operatorSettingsFile())) { + try { + Files.setLastModifiedTime(operatorSettingsFile(), FileTime.from(Instant.now())); + } catch (IOException e) { + logger.warn("encountered I/O error trying to update file settings timestamp", e); + } + } } } @@ -151,9 +209,21 @@ boolean watching() { return this.watchService != null; } - synchronized void startWatcher(boolean onStartup) { + private void cleanupWatchKeys() { + if (settingsDirWatchKey != null) { + settingsDirWatchKey.cancel(); + settingsDirWatchKey = null; + } + if (configDirWatchKey != null) { + configDirWatchKey.cancel(); + configDirWatchKey = null; + } + } + + synchronized void startWatcher(ClusterState clusterState, boolean onStartup) { if (watching() || active == false) { - // already watching or inactive, nothing to do + refreshExistingFileStateIfNeeded(clusterState); + return; } @@ -189,10 +259,11 @@ synchronized void startWatcher(boolean onStartup) { // We watch the config directory always, even if initially we had an operator directory // it can be deleted and created later. The config directory never goes away, we only // register it once for watching. - enableSettingsWatcher(null, operatorSettingsDir().getParent()); + configDirWatchKey = enableSettingsWatcher(configDirWatchKey, operatorSettingsDir().getParent()); } catch (Exception e) { if (watchService != null) { try { + cleanupWatchKeys(); this.watchService.close(); } catch (Exception ignore) {} finally { this.watchService = null; @@ -242,7 +313,15 @@ synchronized void startWatcher(boolean onStartup) { settingsDirWatchKey = enableSettingsWatcher(settingsDirWatchKey, settingsDir); if (watchedFileChanged(path)) { - processFileSettings(path, (e) -> logger.error("Error processing operator settings json file", e)).await(); + processingLatch = processFileSettings( + path, + (e) -> logger.error("Error processing operator settings json file", e) + ); + // After we get and set the processing latch, we need to check if stop wasn't + // invoked in the meantime. Stop will invalidate all watch keys. + if (configDirWatchKey != null) { + processingLatch.await(); + } } } catch (IOException e) { logger.warn("encountered I/O error while watching file settings", e); @@ -266,12 +345,12 @@ synchronized void stopWatcher() { logger.debug("stopping watcher ..."); if (watching()) { try { - if (settingsDirWatchKey != null) { - settingsDirWatchKey.cancel(); - settingsDirWatchKey = null; - } + cleanupWatchKeys(); fileUpdateState = null; watchService.close(); + if (processingLatch != null) { + processingLatch.countDown(); + } if (watcherThreadLatch != null) { watcherThreadLatch.await(); } diff --git a/server/src/main/java/org/elasticsearch/reservedstate/service/ReservedClusterStateService.java b/server/src/main/java/org/elasticsearch/reservedstate/service/ReservedClusterStateService.java index 96b5d5597f4cd..0730a50310740 100644 --- a/server/src/main/java/org/elasticsearch/reservedstate/service/ReservedClusterStateService.java +++ b/server/src/main/java/org/elasticsearch/reservedstate/service/ReservedClusterStateService.java @@ -10,7 +10,6 @@ import org.apache.logging.log4j.LogManager; import org.apache.logging.log4j.Logger; -import org.elasticsearch.Version; import org.elasticsearch.action.ActionListener; import org.elasticsearch.action.ActionResponse; import org.elasticsearch.cluster.ClusterState; @@ -103,7 +102,7 @@ public void process(String namespace, XContentParser parser, Consumer stateChunk = stateChunkParser.apply(parser, null); } catch (Exception e) { ErrorState errorState = new ErrorState(namespace, -1L, e, ReservedStateErrorMetadata.ErrorKind.PARSING); - saveErrorState(errorState); + saveErrorState(clusterService.state(), errorState); logger.debug("error processing state change request for [{}] with the following errors [{}]", namespace, errorState); errorListener.accept( @@ -138,7 +137,7 @@ public void process(String namespace, ReservedStateChunk reservedStateChunk, Con ReservedStateErrorMetadata.ErrorKind.PARSING ); - saveErrorState(errorState); + saveErrorState(clusterService.state(), errorState); logger.debug("error processing state change request for [{}] with the following errors [{}]", namespace, errorState); errorListener.accept( @@ -149,9 +148,6 @@ public void process(String namespace, ReservedStateChunk reservedStateChunk, Con ClusterState state = clusterService.state(); ReservedStateMetadata existingMetadata = state.metadata().reservedStateMetadata().get(namespace); - if (checkMetadataVersion(namespace, existingMetadata, reservedStateVersion) == false) { - return; - } clusterService.submitStateUpdateTask( "reserved cluster state [" + namespace + "]", @@ -160,7 +156,7 @@ public void process(String namespace, ReservedStateChunk reservedStateChunk, Con reservedStateChunk, handlers, orderedHandlers, - (errorState) -> saveErrorState(errorState), + (clusterState, errorState) -> saveErrorState(clusterState, errorState), new ActionListener<>() { @Override public void onResponse(ActionResponse.Empty empty) { @@ -174,6 +170,8 @@ public void onFailure(Exception e) { if (isNewError(existingMetadata, reservedStateVersion.version())) { logger.debug("Failed to apply reserved cluster state", e); errorListener.accept(e); + } else { + errorListener.accept(null); } } } @@ -183,39 +181,6 @@ public void onFailure(Exception e) { ); } - // package private for testing - static boolean checkMetadataVersion( - String namespace, - ReservedStateMetadata existingMetadata, - ReservedStateVersion reservedStateVersion - ) { - if (Version.CURRENT.before(reservedStateVersion.minCompatibleVersion())) { - logger.warn( - () -> format( - "Reserved cluster state version [%s] for namespace [%s] is not compatible with this Elasticsearch node", - reservedStateVersion.minCompatibleVersion(), - namespace - ) - ); - return false; - } - - if (existingMetadata != null && existingMetadata.version() >= reservedStateVersion.version()) { - logger.warn( - () -> format( - "Not updating reserved cluster state for namespace [%s], because version [%s] is less or equal" - + " to the current metadata version [%s]", - namespace, - reservedStateVersion.version(), - existingMetadata.version() - ) - ); - return false; - } - - return true; - } - // package private for testing static boolean isNewError(ReservedStateMetadata existingMetadata, Long newStateVersion) { return (existingMetadata == null @@ -223,8 +188,7 @@ static boolean isNewError(ReservedStateMetadata existingMetadata, Long newStateV || existingMetadata.errorMetadata().version() < newStateVersion); } - private void saveErrorState(ErrorState errorState) { - ClusterState clusterState = clusterService.state(); + private void saveErrorState(ClusterState clusterState, ErrorState errorState) { ReservedStateMetadata existingMetadata = clusterState.metadata().reservedStateMetadata().get(errorState.namespace()); if (isNewError(existingMetadata, errorState.version()) == false) { diff --git a/server/src/main/java/org/elasticsearch/reservedstate/service/ReservedStateErrorTaskExecutor.java b/server/src/main/java/org/elasticsearch/reservedstate/service/ReservedStateErrorTaskExecutor.java index 5a3d70668855b..ea37daf87ba66 100644 --- a/server/src/main/java/org/elasticsearch/reservedstate/service/ReservedStateErrorTaskExecutor.java +++ b/server/src/main/java/org/elasticsearch/reservedstate/service/ReservedStateErrorTaskExecutor.java @@ -27,10 +27,9 @@ record ReservedStateErrorTaskExecutor() implements ClusterStateTaskExecutor> taskContexts) { for (final var taskContext : taskContexts) { - currentState = taskContext.getTask().execute(currentState); - taskContext.success( - () -> taskContext.getTask().listener().delegateFailure((l, s) -> l.onResponse(ActionResponse.Empty.INSTANCE)) - ); + final var task = taskContext.getTask(); + currentState = task.execute(currentState); + taskContext.success(() -> task.listener().onResponse(ActionResponse.Empty.INSTANCE)); } return currentState; } diff --git a/server/src/main/java/org/elasticsearch/reservedstate/service/ReservedStateUpdateTask.java b/server/src/main/java/org/elasticsearch/reservedstate/service/ReservedStateUpdateTask.java index 0631aee59cf6e..c46c555856d62 100644 --- a/server/src/main/java/org/elasticsearch/reservedstate/service/ReservedStateUpdateTask.java +++ b/server/src/main/java/org/elasticsearch/reservedstate/service/ReservedStateUpdateTask.java @@ -10,6 +10,7 @@ import org.apache.logging.log4j.LogManager; import org.apache.logging.log4j.Logger; +import org.elasticsearch.Version; import org.elasticsearch.action.ActionListener; import org.elasticsearch.action.ActionResponse; import org.elasticsearch.cluster.ClusterState; @@ -27,7 +28,7 @@ import java.util.List; import java.util.Map; import java.util.Set; -import java.util.function.Consumer; +import java.util.function.BiConsumer; import static org.elasticsearch.ExceptionsHelper.stackTrace; import static org.elasticsearch.core.Strings.format; @@ -46,7 +47,7 @@ public class ReservedStateUpdateTask implements ClusterStateTaskListener { private final ReservedStateChunk stateChunk; private final Map> handlers; private final Collection orderedHandlers; - private final Consumer errorReporter; + private final BiConsumer errorReporter; private final ActionListener listener; public ReservedStateUpdateTask( @@ -54,7 +55,7 @@ public ReservedStateUpdateTask( ReservedStateChunk stateChunk, Map> handlers, Collection orderedHandlers, - Consumer errorReporter, + BiConsumer errorReporter, ActionListener listener ) { this.namespace = namespace; @@ -79,6 +80,10 @@ protected ClusterState execute(final ClusterState currentState) { Map reservedState = stateChunk.state(); ReservedStateVersion reservedStateVersion = stateChunk.metadata(); + if (checkMetadataVersion(namespace, existingMetadata, reservedStateVersion) == false) { + return currentState; + } + var reservedMetadataBuilder = new ReservedStateMetadata.Builder(namespace).version(reservedStateVersion.version()); List errors = new ArrayList<>(); @@ -107,7 +112,7 @@ protected ClusterState execute(final ClusterState currentState) { ReservedStateErrorMetadata.ErrorKind.VALIDATION ); - errorReporter.accept(errorState); + errorReporter.accept(currentState, errorState); throw new IllegalStateException("Error processing state change request for " + namespace + ", errors: " + errorState); } @@ -128,4 +133,49 @@ private Set keysForHandler(ReservedStateMetadata reservedStateMetadata, return reservedStateMetadata.handlers().get(handlerName).keys(); } + + static boolean checkMetadataVersion( + String namespace, + ReservedStateMetadata existingMetadata, + ReservedStateVersion reservedStateVersion + ) { + if (Version.CURRENT.before(reservedStateVersion.minCompatibleVersion())) { + logger.warn( + () -> format( + "Reserved cluster state version [%s] for namespace [%s] is not compatible with this Elasticsearch node", + reservedStateVersion.minCompatibleVersion(), + namespace + ) + ); + return false; + } + + // Version 0 is special, snapshot restores will reset to 0. + if (reservedStateVersion.version() <= 0L) { + logger.warn( + () -> format( + "Not updating reserved cluster state for namespace [%s], because version [%s] is less or equal to 0", + namespace, + reservedStateVersion.version(), + existingMetadata.version() + ) + ); + return false; + } + + if (existingMetadata != null && existingMetadata.version() >= reservedStateVersion.version()) { + logger.warn( + () -> format( + "Not updating reserved cluster state for namespace [%s], because version [%s] is less or equal" + + " to the current metadata version [%s]", + namespace, + reservedStateVersion.version(), + existingMetadata.version() + ) + ); + return false; + } + + return true; + } } diff --git a/server/src/main/java/org/elasticsearch/rest/AbstractRestChannel.java b/server/src/main/java/org/elasticsearch/rest/AbstractRestChannel.java index b04be4b5fc570..d7e15afe31800 100644 --- a/server/src/main/java/org/elasticsearch/rest/AbstractRestChannel.java +++ b/server/src/main/java/org/elasticsearch/rest/AbstractRestChannel.java @@ -169,11 +169,8 @@ public final BytesStream bytesOutput() { return bytesOut; } - /** - * Releases the current output buffer for this channel. Must be called after the buffer derived from {@link #bytesOutput} is no longer - * needed. - */ - protected final void releaseOutputBuffer() { + @Override + public final void releaseOutputBuffer() { if (bytesOut != null) { try { bytesOut.close(); diff --git a/server/src/main/java/org/elasticsearch/rest/RestChannel.java b/server/src/main/java/org/elasticsearch/rest/RestChannel.java index eb4c631e32532..4e0cc8453a4e5 100644 --- a/server/src/main/java/org/elasticsearch/rest/RestChannel.java +++ b/server/src/main/java/org/elasticsearch/rest/RestChannel.java @@ -31,6 +31,12 @@ XContentBuilder newBuilder(@Nullable XContentType xContentType, @Nullable XConte BytesStream bytesOutput(); + /** + * Releases the current output buffer for this channel. Must be called after the buffer derived from {@link #bytesOutput} is no longer + * needed. + */ + void releaseOutputBuffer(); + RestRequest request(); /** diff --git a/server/src/main/java/org/elasticsearch/rest/RestController.java b/server/src/main/java/org/elasticsearch/rest/RestController.java index 54be23a5386a8..f3196bb2d4be1 100644 --- a/server/src/main/java/org/elasticsearch/rest/RestController.java +++ b/server/src/main/java/org/elasticsearch/rest/RestController.java @@ -714,6 +714,11 @@ public BytesStream bytesOutput() { return delegate.bytesOutput(); } + @Override + public void releaseOutputBuffer() { + delegate.releaseOutputBuffer(); + } + @Override public RestRequest request() { return delegate.request(); @@ -726,8 +731,16 @@ public boolean detailedErrorsEnabled() { @Override public void sendResponse(RestResponse response) { - close(); - delegate.sendResponse(response); + boolean success = false; + try { + close(); + delegate.sendResponse(response); + success = true; + } finally { + if (success == false) { + releaseOutputBuffer(); + } + } } private void close() { diff --git a/server/src/main/java/org/elasticsearch/rest/action/admin/cluster/RestReloadSecureSettingsAction.java b/server/src/main/java/org/elasticsearch/rest/action/admin/cluster/RestReloadSecureSettingsAction.java index 8e0c1e403a7e8..86ac7088642d1 100644 --- a/server/src/main/java/org/elasticsearch/rest/action/admin/cluster/RestReloadSecureSettingsAction.java +++ b/server/src/main/java/org/elasticsearch/rest/action/admin/cluster/RestReloadSecureSettingsAction.java @@ -78,7 +78,7 @@ public RestResponse buildResponse(NodesReloadSecureSettingsResponse response, XC builder.field("cluster_name", response.getClusterName().value()); response.toXContent(builder, channel.request()); builder.endObject(); - nodesRequestBuilder.request().closePassword(); + nodesRequestBuilder.request().close(); return new RestResponse(RestStatus.OK, builder); } }); diff --git a/server/src/main/java/org/elasticsearch/rest/action/search/RestKnnSearchAction.java b/server/src/main/java/org/elasticsearch/rest/action/search/RestKnnSearchAction.java index 30b25c8d4cfef..b697db7929ecf 100644 --- a/server/src/main/java/org/elasticsearch/rest/action/search/RestKnnSearchAction.java +++ b/server/src/main/java/org/elasticsearch/rest/action/search/RestKnnSearchAction.java @@ -10,6 +10,7 @@ import org.elasticsearch.action.search.SearchRequestBuilder; import org.elasticsearch.client.internal.node.NodeClient; +import org.elasticsearch.core.RestApiVersion; import org.elasticsearch.rest.BaseRestHandler; import org.elasticsearch.rest.RestRequest; import org.elasticsearch.rest.action.RestCancellableNodeClient; @@ -28,11 +29,16 @@ */ public class RestKnnSearchAction extends BaseRestHandler { + static final String DEPRECATION_MESSAGE = "The kNN search API has been replaced by the `knn` option in the search API."; + public RestKnnSearchAction() {} @Override public List routes() { - return List.of(new Route(GET, "{index}/_knn_search"), new Route(POST, "{index}/_knn_search")); + return List.of( + Route.builder(GET, "{index}/_knn_search").deprecated(DEPRECATION_MESSAGE, RestApiVersion.V_8).build(), + Route.builder(POST, "{index}/_knn_search").deprecated(DEPRECATION_MESSAGE, RestApiVersion.V_8).build() + ); } @Override diff --git a/server/src/main/java/org/elasticsearch/script/Metadata.java b/server/src/main/java/org/elasticsearch/script/Metadata.java index 88a93c2d6ea92..3da49792b2924 100644 --- a/server/src/main/java/org/elasticsearch/script/Metadata.java +++ b/server/src/main/java/org/elasticsearch/script/Metadata.java @@ -44,7 +44,7 @@ public class Metadata { protected static final String VERSION_TYPE = "_version_type"; protected static final String VERSION = "_version"; protected static final String TYPE = "_type"; // type is deprecated, so it's supported in the map but not available as a getter - protected static final String TIMESTAMP = "_now"; + protected static final String NOW = "_now"; protected static final String OP = "op"; protected static final String IF_SEQ_NO = "_if_seq_no"; protected static final String IF_PRIMARY_TERM = "_if_primary_term"; @@ -122,8 +122,8 @@ public void setVersion(long version) { put(VERSION, version); } - public ZonedDateTime getTimestamp() { - return ZonedDateTime.ofInstant(Instant.ofEpochMilli(getNumber(TIMESTAMP).longValue()), ZoneOffset.UTC); + public ZonedDateTime getNow() { + return ZonedDateTime.ofInstant(Instant.ofEpochMilli(getNumber(NOW).longValue()), ZoneOffset.UTC); } public String getOp() { diff --git a/server/src/main/java/org/elasticsearch/script/UpdateCtxMap.java b/server/src/main/java/org/elasticsearch/script/UpdateCtxMap.java index 738eca6a427f4..42d57cac1263d 100644 --- a/server/src/main/java/org/elasticsearch/script/UpdateCtxMap.java +++ b/server/src/main/java/org/elasticsearch/script/UpdateCtxMap.java @@ -22,10 +22,10 @@ public UpdateCtxMap( String routing, String type, String op, - long timestamp, + long now, Map source ) { - super(source, new UpdateMetadata(index, id, version, routing, type, op, timestamp)); + super(source, new UpdateMetadata(index, id, version, routing, type, op, now)); } protected UpdateCtxMap(Map source, Metadata metadata) { diff --git a/server/src/main/java/org/elasticsearch/script/UpdateMetadata.java b/server/src/main/java/org/elasticsearch/script/UpdateMetadata.java index 678c8a3f7faca..2e88ddb5a3b1a 100644 --- a/server/src/main/java/org/elasticsearch/script/UpdateMetadata.java +++ b/server/src/main/java/org/elasticsearch/script/UpdateMetadata.java @@ -47,14 +47,14 @@ public class UpdateMetadata extends Metadata { SET_ONCE_STRING, OP, new FieldProperty<>(String.class, true, true, null), - TIMESTAMP, + NOW, SET_ONCE_LONG ); protected final Set validOps; - public UpdateMetadata(String index, String id, long version, String routing, String type, String op, long timestamp) { - this(metadataMap(index, id, version, routing, type, op, timestamp), Set.of("noop", "index", "delete"), PROPERTIES); + public UpdateMetadata(String index, String id, long version, String routing, String type, String op, long now) { + this(metadataMap(index, id, version, routing, type, op, now), Set.of("noop", "index", "delete"), PROPERTIES); } protected UpdateMetadata(Map metadata, Set validOps, Map> properties) { @@ -69,7 +69,7 @@ protected static Map metadataMap( String routing, String type, String op, - long timestamp + long now ) { Map metadata = Maps.newHashMapWithExpectedSize(PROPERTIES.size()); metadata.put(INDEX, index); @@ -78,7 +78,7 @@ protected static Map metadataMap( metadata.put(ROUTING, routing); metadata.put(TYPE, type); metadata.put(OP, op); - metadata.put(TIMESTAMP, timestamp); + metadata.put(NOW, now); return metadata; } diff --git a/server/src/main/java/org/elasticsearch/script/UpsertCtxMap.java b/server/src/main/java/org/elasticsearch/script/UpsertCtxMap.java index 5a871502bf065..17a7e2d2b5b84 100644 --- a/server/src/main/java/org/elasticsearch/script/UpsertCtxMap.java +++ b/server/src/main/java/org/elasticsearch/script/UpsertCtxMap.java @@ -14,7 +14,7 @@ * Metadata for insert via upsert in the Update context */ public class UpsertCtxMap extends UpdateCtxMap { - public UpsertCtxMap(String index, String id, String op, long timestamp, Map source) { - super(source, new UpsertMetadata(index, id, op, timestamp)); + public UpsertCtxMap(String index, String id, String op, long now, Map source) { + super(source, new UpsertMetadata(index, id, op, now)); } } diff --git a/server/src/main/java/org/elasticsearch/script/UpsertMetadata.java b/server/src/main/java/org/elasticsearch/script/UpsertMetadata.java index b82b89466b5be..7944bb4d1e784 100644 --- a/server/src/main/java/org/elasticsearch/script/UpsertMetadata.java +++ b/server/src/main/java/org/elasticsearch/script/UpsertMetadata.java @@ -21,20 +21,20 @@ class UpsertMetadata extends UpdateMetadata { SET_ONCE_STRING, OP, new FieldProperty<>(String.class, true, true, null), - TIMESTAMP, + NOW, SET_ONCE_LONG ); - UpsertMetadata(String index, String id, String op, long timestamp) { - super(metadataMap(index, id, op, timestamp), Set.of("noop", "create"), PROPERTIES); + UpsertMetadata(String index, String id, String op, long now) { + super(metadataMap(index, id, op, now), Set.of("noop", "create"), PROPERTIES); } - protected static Map metadataMap(String index, String id, String op, long timestamp) { + protected static Map metadataMap(String index, String id, String op, long now) { Map metadata = Maps.newHashMapWithExpectedSize(PROPERTIES.size()); metadata.put(INDEX, index); metadata.put(ID, id); metadata.put(OP, op); - metadata.put(TIMESTAMP, timestamp); + metadata.put(NOW, now); return metadata; } diff --git a/server/src/main/java/org/elasticsearch/search/aggregations/bucket/range/AbstractRangeBuilder.java b/server/src/main/java/org/elasticsearch/search/aggregations/bucket/range/AbstractRangeBuilder.java index ccaf8f8f1210d..87f726b47579e 100644 --- a/server/src/main/java/org/elasticsearch/search/aggregations/bucket/range/AbstractRangeBuilder.java +++ b/server/src/main/java/org/elasticsearch/search/aggregations/bucket/range/AbstractRangeBuilder.java @@ -83,7 +83,10 @@ protected Range[] processRanges(Function rangeProcessor) { return ranges; } - private static void sortRanges(final Range[] ranges) { + /** + * Sort the provided ranges in place. + */ + static void sortRanges(final Range[] ranges) { new InPlaceMergeSorter() { @Override diff --git a/server/src/main/java/org/elasticsearch/search/aggregations/bucket/range/GeoDistanceAggregationBuilder.java b/server/src/main/java/org/elasticsearch/search/aggregations/bucket/range/GeoDistanceAggregationBuilder.java index e3fdec316efd6..4472c3f0628c6 100644 --- a/server/src/main/java/org/elasticsearch/search/aggregations/bucket/range/GeoDistanceAggregationBuilder.java +++ b/server/src/main/java/org/elasticsearch/search/aggregations/bucket/range/GeoDistanceAggregationBuilder.java @@ -456,7 +456,7 @@ protected ValuesSourceAggregatorFactory innerBuild( if (ranges.length == 0) { throw new IllegalArgumentException("No [ranges] specified for the [" + this.getName() + "] aggregation"); } - + AbstractRangeBuilder.sortRanges(ranges); return new GeoDistanceRangeAggregatorFactory( name, config, diff --git a/server/src/main/java/org/elasticsearch/search/internal/ExitableDirectoryReader.java b/server/src/main/java/org/elasticsearch/search/internal/ExitableDirectoryReader.java index a72bce0966050..da0319818b487 100644 --- a/server/src/main/java/org/elasticsearch/search/internal/ExitableDirectoryReader.java +++ b/server/src/main/java/org/elasticsearch/search/internal/ExitableDirectoryReader.java @@ -18,6 +18,7 @@ import org.apache.lucene.index.QueryTimeout; import org.apache.lucene.index.Terms; import org.apache.lucene.index.TermsEnum; +import org.apache.lucene.search.DocIdSetIterator; import org.apache.lucene.search.suggest.document.CompletionTerms; import org.apache.lucene.util.BytesRef; import org.apache.lucene.util.automaton.CompiledAutomaton; @@ -346,12 +347,24 @@ public void visit(int docID) throws IOException { in.visit(docID); } + @Override + public void visit(DocIdSetIterator iterator) throws IOException { + checkAndThrowWithSampling(); + in.visit(iterator); + } + @Override public void visit(int docID, byte[] packedValue) throws IOException { checkAndThrowWithSampling(); in.visit(docID, packedValue); } + @Override + public void visit(DocIdSetIterator iterator, byte[] packedValue) throws IOException { + checkAndThrowWithSampling(); + in.visit(iterator, packedValue); + } + @Override public PointValues.Relation compare(byte[] minPackedValue, byte[] maxPackedValue) { queryCancellation.checkCancelled(); diff --git a/server/src/main/java/org/elasticsearch/snapshots/RestoreService.java b/server/src/main/java/org/elasticsearch/snapshots/RestoreService.java index 512fe1766133f..bb0587b756ca4 100644 --- a/server/src/main/java/org/elasticsearch/snapshots/RestoreService.java +++ b/server/src/main/java/org/elasticsearch/snapshots/RestoreService.java @@ -77,6 +77,7 @@ import org.elasticsearch.repositories.Repository; import org.elasticsearch.repositories.RepositoryData; import org.elasticsearch.repositories.blobstore.BlobStoreRepository; +import org.elasticsearch.reservedstate.service.FileSettingsService; import java.io.IOException; import java.util.ArrayList; @@ -183,6 +184,8 @@ public class RestoreService implements ClusterStateApplier { private final IndicesService indicesService; + private final FileSettingsService fileSettingsService; + private volatile boolean refreshRepositoryUuidOnRestore; public RestoreService( @@ -194,7 +197,8 @@ public RestoreService( IndexMetadataVerifier indexMetadataVerifier, ShardLimitValidator shardLimitValidator, SystemIndices systemIndices, - IndicesService indicesService + IndicesService indicesService, + FileSettingsService fileSettingsService ) { this.clusterService = clusterService; this.repositoriesService = repositoriesService; @@ -209,6 +213,7 @@ public RestoreService( this.shardLimitValidator = shardLimitValidator; this.systemIndices = systemIndices; this.indicesService = indicesService; + this.fileSettingsService = fileSettingsService; this.refreshRepositoryUuidOnRestore = REFRESH_REPO_UUID_ON_RESTORE_SETTING.get(clusterService.getSettings()); clusterService.getClusterSettings() .addSettingsUpdateConsumer(REFRESH_REPO_UUID_ON_RESTORE_SETTING, this::setRefreshRepositoryUuidOnRestore); @@ -1389,6 +1394,7 @@ && isSystemIndex(snapshotIndexMetadata) == false) { // Restore global state if needed if (request.includeGlobalState()) { applyGlobalStateRestore(currentState, mdBuilder); + fileSettingsService.handleSnapshotRestore(currentState, mdBuilder); } if (completed(shards)) { diff --git a/server/src/test/java/org/elasticsearch/action/admin/cluster/configuration/AddVotingConfigExclusionsRequestTests.java b/server/src/test/java/org/elasticsearch/action/admin/cluster/configuration/AddVotingConfigExclusionsRequestTests.java index 5023fb131a1aa..5529c52044624 100644 --- a/server/src/test/java/org/elasticsearch/action/admin/cluster/configuration/AddVotingConfigExclusionsRequestTests.java +++ b/server/src/test/java/org/elasticsearch/action/admin/cluster/configuration/AddVotingConfigExclusionsRequestTests.java @@ -25,8 +25,10 @@ import static java.util.Collections.emptyMap; import static java.util.Collections.emptySet; +import static org.hamcrest.Matchers.allOf; import static org.hamcrest.Matchers.contains; import static org.hamcrest.Matchers.containsInAnyOrder; +import static org.hamcrest.Matchers.containsString; import static org.hamcrest.Matchers.equalTo; public class AddVotingConfigExclusionsRequestTests extends ESTestCase { @@ -235,6 +237,40 @@ public void testResolveByNodeNames() { ); } + public void testResolveAmbiguousName() { + final DiscoveryNode node1 = new DiscoveryNode( + "ambiguous-name", + "nodeId1", + buildNewFakeTransportAddress(), + emptyMap(), + Set.of(DiscoveryNodeRole.MASTER_ROLE), + Version.CURRENT + ); + + final DiscoveryNode node2 = new DiscoveryNode( + "ambiguous-name", + "nodeId2", + buildNewFakeTransportAddress(), + emptyMap(), + Set.of(DiscoveryNodeRole.MASTER_ROLE), + Version.CURRENT + ); + + final ClusterState clusterState = ClusterState.builder(new ClusterName("cluster")) + .nodes(new Builder().add(node1).add(node2).localNodeId(node1.getId())) + .build(); + + final var request = new AddVotingConfigExclusionsRequest("ambiguous-name"); + assertThat( + expectThrows(IllegalArgumentException.class, () -> request.resolveVotingConfigExclusions(clusterState)).getMessage(), + allOf( + containsString("node name [ambiguous-name] is ambiguous"), + containsString(node1.descriptionWithoutAttributes()), + containsString(node2.descriptionWithoutAttributes()) + ) + ); + } + public void testResolveRemoveExistingVotingConfigExclusions() { final DiscoveryNode node1 = new DiscoveryNode( "nodeName1", diff --git a/server/src/test/java/org/elasticsearch/action/ingest/SimulateExecutionServiceTests.java b/server/src/test/java/org/elasticsearch/action/ingest/SimulateExecutionServiceTests.java index f609114832a70..451ac526773da 100644 --- a/server/src/test/java/org/elasticsearch/action/ingest/SimulateExecutionServiceTests.java +++ b/server/src/test/java/org/elasticsearch/action/ingest/SimulateExecutionServiceTests.java @@ -181,6 +181,7 @@ public void testExecuteVerboseItemWithOnFailure() throws Exception { metadata.put(CompoundProcessor.ON_FAILURE_PROCESSOR_TYPE_FIELD, "mock"); metadata.put(CompoundProcessor.ON_FAILURE_PROCESSOR_TAG_FIELD, "processor_0"); metadata.put(CompoundProcessor.ON_FAILURE_MESSAGE_FIELD, "processor failed"); + metadata.put(CompoundProcessor.ON_FAILURE_PIPELINE_FIELD, "_id"); assertVerboseResult( simulateDocumentVerboseResult.getProcessorResults().get(1), pipeline.getId(), diff --git a/server/src/test/java/org/elasticsearch/action/search/MultiSearchRequestTests.java b/server/src/test/java/org/elasticsearch/action/search/MultiSearchRequestTests.java index f23aba27fde22..d0ec363567396 100644 --- a/server/src/test/java/org/elasticsearch/action/search/MultiSearchRequestTests.java +++ b/server/src/test/java/org/elasticsearch/action/search/MultiSearchRequestTests.java @@ -22,6 +22,7 @@ import org.elasticsearch.rest.action.search.RestMultiSearchAction; import org.elasticsearch.search.Scroll; import org.elasticsearch.search.builder.SearchSourceBuilder; +import org.elasticsearch.tasks.TaskId; import org.elasticsearch.test.ESTestCase; import org.elasticsearch.test.StreamsUtils; import org.elasticsearch.test.rest.FakeRestRequest; @@ -486,6 +487,24 @@ public void testEmptyFirstLine2() throws Exception { ); } + public void testTaskDescription() { + MultiSearchRequest request = new MultiSearchRequest(); + request.add(new SearchRequest().preference("abc")); + request.add(new SearchRequest().routing("r").preference("xyz")); + request.add(new SearchRequest().indices("index-1")); + + String description = request.createTask(0, "type", "action", TaskId.EMPTY_TASK_ID, Map.of()).getDescription(); + assertThat( + description, + equalTo( + "requests[3]: " + + "indices[], search_type[QUERY_THEN_FETCH], source[], preference[abc] | " + + "indices[], search_type[QUERY_THEN_FETCH], source[], routing[r], preference[xyz] | " + + "indices[index-1], search_type[QUERY_THEN_FETCH], source[]" + ) + ); + } + private void assertExpandWildcardsValue(IndicesOptions options, String expectedValue) throws IOException { SearchRequest request = new SearchRequest(); request.indicesOptions(options); diff --git a/server/src/test/java/org/elasticsearch/action/search/SearchRequestTests.java b/server/src/test/java/org/elasticsearch/action/search/SearchRequestTests.java index 70a57e2174865..44838951fc42c 100644 --- a/server/src/test/java/org/elasticsearch/action/search/SearchRequestTests.java +++ b/server/src/test/java/org/elasticsearch/action/search/SearchRequestTests.java @@ -276,6 +276,17 @@ public void testDescriptionIncludesScroll() { ); } + public void testDescriptionIncludePreferenceAndRouting() { + assertThat( + toDescription(new SearchRequest().preference("abc")), + equalTo("indices[], search_type[QUERY_THEN_FETCH], source[], preference[abc]") + ); + assertThat( + toDescription(new SearchRequest().preference("abc").routing("xyz")), + equalTo("indices[], search_type[QUERY_THEN_FETCH], source[], routing[xyz], preference[abc]") + ); + } + private String toDescription(SearchRequest request) { return request.createTask(0, "test", SearchAction.NAME, TaskId.EMPTY_TASK_ID, emptyMap()).getDescription(); } diff --git a/server/src/test/java/org/elasticsearch/cluster/coordination/CoordinationDiagnosticsServiceTests.java b/server/src/test/java/org/elasticsearch/cluster/coordination/CoordinationDiagnosticsServiceTests.java index d26d0c0c96da4..372c5254e2717 100644 --- a/server/src/test/java/org/elasticsearch/cluster/coordination/CoordinationDiagnosticsServiceTests.java +++ b/server/src/test/java/org/elasticsearch/cluster/coordination/CoordinationDiagnosticsServiceTests.java @@ -19,11 +19,13 @@ import org.elasticsearch.cluster.routing.RoutingTable; import org.elasticsearch.cluster.service.ClusterService; import org.elasticsearch.common.settings.Settings; +import org.elasticsearch.common.util.concurrent.DeterministicTaskQueue; import org.elasticsearch.monitor.StatusInfo; import org.elasticsearch.test.EqualsHashCodeTestUtils; import org.elasticsearch.threadpool.Scheduler; import org.elasticsearch.threadpool.ThreadPool; import org.elasticsearch.transport.TransportService; +import org.hamcrest.Matchers; import org.junit.Before; import java.io.IOException; @@ -31,10 +33,10 @@ import java.util.Arrays; import java.util.Collections; import java.util.List; +import java.util.Map; import java.util.UUID; import java.util.concurrent.ConcurrentHashMap; import java.util.concurrent.ConcurrentMap; -import java.util.concurrent.atomic.AtomicInteger; import static org.elasticsearch.cluster.coordination.AbstractCoordinatorTestCase.Cluster.EXTREME_DELAY_VARIABILITY; import static org.elasticsearch.cluster.coordination.CoordinationDiagnosticsService.ClusterFormationStateOrException; @@ -593,82 +595,44 @@ public void testRedForNoMasterAndWithMasterEligibleNodesAndNoLeader() throws IOE } } - public void testBeginPollingClusterFormationInfo() { - /* - * This test sets up a 4-node cluster (3 master eligible). We call beginPollingClusterFormationInfo() on each node. This is allowed - * to run for a bit, and then we assert that we have cluster formation information from each master eligible node. Then we - * disconnect a random master eligible node, allow the polling to continue to run (we never cancelled it), and assert that we - * have the expected exceptions in the polling results. - */ - try (Cluster cluster = new Cluster(3, true, Settings.EMPTY)) { - createAndAddNonMasterNode(cluster); - cluster.runRandomly(); - cluster.stabilise(); - List masterNodes = cluster.clusterNodes.stream() - .map(Cluster.ClusterNode::getLocalNode) - .filter(DiscoveryNode::isMasterNode) - .toList(); - cluster.clusterNodes.stream().filter(node -> node.getLocalNode().isMasterNode()).forEach(node -> { - ConcurrentMap nodeToClusterFormationStateMap = new ConcurrentHashMap<>(); - node.coordinationDiagnosticsService.beginPollingClusterFormationInfo( - masterNodes, - nodeToClusterFormationStateMap::put, - cancellable -> {} - ); + public void testBeginPollingClusterFormationInfo() throws Exception { + MasterHistoryService masterHistoryService = createMasterHistoryService(); + var clusterService = mock(ClusterService.class); + when(clusterService.getSettings()).thenReturn(Settings.EMPTY); + when(clusterService.state()).thenReturn(nullMasterClusterState); + DiscoveryNode localNode = node3; + when(clusterService.localNode()).thenReturn(localNode); + Coordinator coordinator = mock(Coordinator.class); + when(coordinator.getFoundPeers()).thenReturn(List.of(node1, node2, localNode)); + DeterministicTaskQueue deterministicTaskQueue = new DeterministicTaskQueue(); + ThreadPool threadPool = deterministicTaskQueue.getThreadPool(); - cluster.runRandomly(false, true, EXTREME_DELAY_VARIABILITY); - cluster.stabilise(); + TransportService transportService = mock(TransportService.class); + when(transportService.getThreadPool()).thenReturn(threadPool); + CoordinationDiagnosticsService coordinationDiagnosticsService = new CoordinationDiagnosticsService( + clusterService, + transportService, + coordinator, + masterHistoryService + ); - /* - * The cluster has now run normally for some period of time, so check that the outputs of - * beginPollingClusterFormationInfo() are present with no exceptions: - */ - assertThat(nodeToClusterFormationStateMap.size(), equalTo(masterNodes.size())); - masterNodes.stream().filter(masterNode -> node.getLocalNode().equals(masterNode) == false).forEach(masterNode -> { - ClusterFormationStateOrException clusterFormationStateOrException = nodeToClusterFormationStateMap.get(masterNode); - assertNotNull(clusterFormationStateOrException); - assertNotNull(clusterFormationStateOrException.clusterFormationState()); - assertNull(clusterFormationStateOrException.exception()); - ClusterFormationFailureHelper.ClusterFormationState clusterFormationState = clusterFormationStateOrException - .clusterFormationState(); - assertThat(clusterFormationState.getDescription(), not(emptyOrNullString())); - }); - - /* - * Now we disconnect a random node, simulate running the cluster for a little while, and make sure that the results of - * beginPollingClusterFormationInfo() contain the expected exceptions. - */ - Cluster.ClusterNode nodeToDisconnect = cluster.clusterNodes.stream() - .filter(clusterNode -> clusterNode.getLocalNode().isMasterNode()) - .findAny() - .get(); - nodeToDisconnect.disconnect(); - cluster.stabilise(); - assertThat(nodeToClusterFormationStateMap.size(), equalTo(masterNodes.size())); - AtomicInteger exceptions = new AtomicInteger(); - masterNodes.stream().filter(masterNode -> node.getLocalNode().equals(masterNode) == false).forEach(masterNode -> { - ClusterFormationStateOrException clusterFormationStateOrException = nodeToClusterFormationStateMap.get(masterNode); - assertNotNull(clusterFormationStateOrException); - if (clusterFormationStateOrException.clusterFormationState() != null) { - assertNull(clusterFormationStateOrException.exception()); - ClusterFormationFailureHelper.ClusterFormationState clusterFormationState = clusterFormationStateOrException - .clusterFormationState(); - assertThat(clusterFormationState.getDescription(), not(emptyOrNullString())); - } else { - assertNotNull(clusterFormationStateOrException.exception()); - exceptions.getAndIncrement(); - } - }); - if (node.equals(nodeToDisconnect)) { - // If this was the disconnected node, it will have encountered exceptions contacting all nodes except itself: - assertThat(exceptions.get(), equalTo(masterNodes.size() - 1)); - } else { - // Other nodes will only have encountered an exception contacting the disconnected node: - assertThat(exceptions.get(), equalTo(1)); - } - nodeToDisconnect.heal(); - }); - } + coordinationDiagnosticsService.beginPollingClusterFormationInfo(); + assertThat(coordinationDiagnosticsService.clusterFormationInfoTasks.size(), equalTo(3)); + coordinationDiagnosticsService.cancelPollingClusterFormationInfo(); + assertThat(coordinationDiagnosticsService.clusterFormationInfoTasks, Matchers.nullValue()); + coordinationDiagnosticsService.clusterChanged( + new ClusterChangedEvent(TEST_SOURCE, nullMasterClusterState, node1MasterClusterState) + ); + assertThat(coordinationDiagnosticsService.clusterFormationInfoTasks.size(), equalTo(3)); + coordinationDiagnosticsService.clusterChanged( + new ClusterChangedEvent(TEST_SOURCE, node1MasterClusterState, nullMasterClusterState) + ); + assertThat(coordinationDiagnosticsService.clusterFormationInfoTasks, Matchers.nullValue()); + /* + * Note that in this test we will never find any values in clusterFormationResponses because transportService is mocked out. + * There is not a reasonable way to plug in a transportService to this simple unit test, so testing that is left to an + * integration test. + */ } public void testBeginPollingClusterFormationInfoCancel() { @@ -689,13 +653,13 @@ public void testBeginPollingClusterFormationInfoCancel() { .toList(); cluster.clusterNodes.stream().filter(node -> node.getLocalNode().isMasterNode()).forEach(node -> { ConcurrentMap nodeToClusterFormationStateMap = new ConcurrentHashMap<>(); - List cancellables = new ArrayList<>(); + Map cancellables = new ConcurrentHashMap<>(); node.coordinationDiagnosticsService.beginPollingClusterFormationInfo( masterNodes, nodeToClusterFormationStateMap::put, - cancellables::add + cancellables ); - cancellables.forEach(Scheduler.Cancellable::cancel); // This is what will most often happen in practice + cancellables.values().forEach(Scheduler.Cancellable::cancel); // This is what will most often happen in practice cluster.runRandomly(false, true, EXTREME_DELAY_VARIABILITY); cluster.stabilise(); assertThat(nodeToClusterFormationStateMap.size(), equalTo(0)); // Everything was cancelled diff --git a/server/src/test/java/org/elasticsearch/cluster/coordination/CoordinatorTests.java b/server/src/test/java/org/elasticsearch/cluster/coordination/CoordinatorTests.java index 1dedcd06b72d6..f977a6ac0bfc8 100644 --- a/server/src/test/java/org/elasticsearch/cluster/coordination/CoordinatorTests.java +++ b/server/src/test/java/org/elasticsearch/cluster/coordination/CoordinatorTests.java @@ -2165,6 +2165,14 @@ public void assertMatched() { } } + public void testInvariantWhenTwoNodeClusterBecomesSingleNodeCluster() { + try (Cluster cluster = new Cluster(2)) { + cluster.stabilise(); + assertTrue(cluster.getAnyNodeExcept(cluster.getAnyLeader()).disconnect()); // Remove non-leader node + cluster.stabilise(); + } + } + @TestLogging( reason = "testing LagDetector and CoordinatorPublication logging", value = "org.elasticsearch.cluster.coordination.LagDetector:DEBUG," diff --git a/server/src/test/java/org/elasticsearch/cluster/metadata/IndexMetadataVerifierTests.java b/server/src/test/java/org/elasticsearch/cluster/metadata/IndexMetadataVerifierTests.java index ad55c53bed5f3..e4a7bb60aca15 100644 --- a/server/src/test/java/org/elasticsearch/cluster/metadata/IndexMetadataVerifierTests.java +++ b/server/src/test/java/org/elasticsearch/cluster/metadata/IndexMetadataVerifierTests.java @@ -26,26 +26,61 @@ public class IndexMetadataVerifierTests extends ESTestCase { public void testArchiveBrokenIndexSettings() { IndexMetadataVerifier service = getIndexMetadataVerifier(); IndexMetadata src = newIndexMeta("foo", Settings.EMPTY); - IndexMetadata indexMetadata = service.archiveBrokenIndexSettings(src); + IndexMetadata indexMetadata = service.archiveOrDeleteBrokenIndexSettings(src); assertSame(indexMetadata, src); src = newIndexMeta("foo", Settings.builder().put("index.refresh_interval", "-200").build()); - indexMetadata = service.archiveBrokenIndexSettings(src); + indexMetadata = service.archiveOrDeleteBrokenIndexSettings(src); assertNotSame(indexMetadata, src); assertEquals("-200", indexMetadata.getSettings().get("archived.index.refresh_interval")); src = newIndexMeta("foo", Settings.builder().put("index.codec", "best_compression1").build()); - indexMetadata = service.archiveBrokenIndexSettings(src); + indexMetadata = service.archiveOrDeleteBrokenIndexSettings(src); assertNotSame(indexMetadata, src); assertEquals("best_compression1", indexMetadata.getSettings().get("archived.index.codec")); src = newIndexMeta("foo", Settings.builder().put("index.refresh.interval", "-1").build()); - indexMetadata = service.archiveBrokenIndexSettings(src); + indexMetadata = service.archiveOrDeleteBrokenIndexSettings(src); assertNotSame(indexMetadata, src); assertEquals("-1", indexMetadata.getSettings().get("archived.index.refresh.interval")); src = newIndexMeta("foo", indexMetadata.getSettings()); // double archive? - indexMetadata = service.archiveBrokenIndexSettings(src); + indexMetadata = service.archiveOrDeleteBrokenIndexSettings(src); + assertSame(indexMetadata, src); + } + + public void testDeleteBrokenSystemIndexSettings() { + IndexMetadataVerifier service = getIndexMetadataVerifier(); + IndexMetadata src = newSystemIndexMeta("foo", Settings.EMPTY); + IndexMetadata indexMetadata = service.archiveOrDeleteBrokenIndexSettings(src); + assertSame(indexMetadata, src); + + src = newSystemIndexMeta("foo", Settings.builder().put("index.refresh_interval", "-200").build()); + indexMetadata = service.archiveOrDeleteBrokenIndexSettings(src); + assertNotSame(indexMetadata, src); + assertNull(indexMetadata.getSettings().get("archived.index.refresh_interval")); + assertNull(indexMetadata.getSettings().get("index.refresh_interval")); + + // previously archived settings are removed + src = newSystemIndexMeta("foo", Settings.builder().put("archived.index.refresh_interval", "200").build()); + indexMetadata = service.archiveOrDeleteBrokenIndexSettings(src); + assertNotSame(indexMetadata, src); + assertNull(indexMetadata.getSettings().get("archived.index.refresh_interval")); + + src = newSystemIndexMeta("foo", Settings.builder().put("index.codec", "best_compression1").build()); + indexMetadata = service.archiveOrDeleteBrokenIndexSettings(src); + assertNotSame(indexMetadata, src); + assertNull(indexMetadata.getSettings().get("archived.index.codec")); + assertNull(indexMetadata.getSettings().get("index.codec")); + + src = newSystemIndexMeta("foo", Settings.builder().put("index.refresh.interval", "-1").build()); + indexMetadata = service.archiveOrDeleteBrokenIndexSettings(src); + assertNotSame(indexMetadata, src); + assertNull(indexMetadata.getSettings().get("archived.index.refresh.interval")); + assertNull(indexMetadata.getSettings().get("index.refresh.interval")); + + src = newSystemIndexMeta("foo", indexMetadata.getSettings()); // double archive? + indexMetadata = service.archiveOrDeleteBrokenIndexSettings(src); assertSame(indexMetadata, src); } @@ -108,6 +143,14 @@ private IndexMetadataVerifier getIndexMetadataVerifier() { } public static IndexMetadata newIndexMeta(String name, Settings indexSettings) { + return newIndexMetaBuilder(name, indexSettings).build(); + } + + public static IndexMetadata newSystemIndexMeta(String name, Settings indexSettings) { + return newIndexMetaBuilder(name, indexSettings).system(true).build(); + } + + private static IndexMetadata.Builder newIndexMetaBuilder(String name, Settings indexSettings) { final Settings settings = Settings.builder() .put(IndexMetadata.SETTING_VERSION_CREATED, randomIndexCompatibleVersion(random())) .put(IndexMetadata.SETTING_NUMBER_OF_REPLICAS, between(0, 5)) @@ -120,6 +163,7 @@ public static IndexMetadata newIndexMeta(String name, Settings indexSettings) { if (randomBoolean()) { indexMetadataBuilder.state(IndexMetadata.State.CLOSE); } - return indexMetadataBuilder.build(); + return indexMetadataBuilder; } + } diff --git a/server/src/test/java/org/elasticsearch/cluster/routing/allocation/MaxRetryAllocationDeciderTests.java b/server/src/test/java/org/elasticsearch/cluster/routing/allocation/MaxRetryAllocationDeciderTests.java index 7bbc1d16839ab..41e5f47fb8a86 100644 --- a/server/src/test/java/org/elasticsearch/cluster/routing/allocation/MaxRetryAllocationDeciderTests.java +++ b/server/src/test/java/org/elasticsearch/cluster/routing/allocation/MaxRetryAllocationDeciderTests.java @@ -183,12 +183,8 @@ public void testFailedAllocation() { assertThat(unassignedPrimary.unassignedInfo().getMessage(), containsString("boom" + i)); // MaxRetryAllocationDecider#canForceAllocatePrimary should return YES decisions because canAllocate returns YES here assertEquals( - Decision.YES, - new MaxRetryAllocationDecider().canForceAllocatePrimary( - unassignedPrimary, - null, - new RoutingAllocation(null, clusterState, null, null, 0) - ) + Decision.Type.YES, + new MaxRetryAllocationDecider().canForceAllocatePrimary(unassignedPrimary, null, newRoutingAllocation(clusterState)).type() ); } // now we go and check that we are actually stick to unassigned on the next failure @@ -207,12 +203,8 @@ public void testFailedAllocation() { assertThat(unassignedPrimary.unassignedInfo().getMessage(), containsString("boom")); // MaxRetryAllocationDecider#canForceAllocatePrimary should return a NO decision because canAllocate returns NO here assertEquals( - Decision.NO, - new MaxRetryAllocationDecider().canForceAllocatePrimary( - unassignedPrimary, - null, - new RoutingAllocation(null, clusterState, null, null, 0) - ) + Decision.Type.NO, + new MaxRetryAllocationDecider().canForceAllocatePrimary(unassignedPrimary, null, newRoutingAllocation(clusterState)).type() ); } @@ -247,12 +239,12 @@ public void testFailedAllocation() { assertThat(unassignedPrimary.unassignedInfo().getMessage(), containsString("boom")); // bumped up the max retry count, so canForceAllocatePrimary should return a YES decision assertEquals( - Decision.YES, + Decision.Type.YES, new MaxRetryAllocationDecider().canForceAllocatePrimary( routingTable.index("idx").shard(0).shard(0), null, - new RoutingAllocation(null, clusterState, null, null, 0) - ) + newRoutingAllocation(clusterState) + ).type() ); // now we start the shard @@ -279,13 +271,17 @@ public void testFailedAllocation() { assertThat(unassignedPrimary.unassignedInfo().getMessage(), containsString("ZOOOMG")); // Counter reset, so MaxRetryAllocationDecider#canForceAllocatePrimary should return a YES decision assertEquals( - Decision.YES, - new MaxRetryAllocationDecider().canForceAllocatePrimary( - unassignedPrimary, - null, - new RoutingAllocation(null, clusterState, null, null, 0) - ) + Decision.Type.YES, + new MaxRetryAllocationDecider().canForceAllocatePrimary(unassignedPrimary, null, newRoutingAllocation(clusterState)).type() ); } + private RoutingAllocation newRoutingAllocation(ClusterState clusterState) { + final var routingAllocation = new RoutingAllocation(null, clusterState, null, null, 0); + if (randomBoolean()) { + routingAllocation.setDebugMode(randomFrom(RoutingAllocation.DebugMode.values())); + } + return routingAllocation; + } + } diff --git a/server/src/test/java/org/elasticsearch/cluster/routing/allocation/ShardsAvailabilityHealthIndicatorServiceTests.java b/server/src/test/java/org/elasticsearch/cluster/routing/allocation/ShardsAvailabilityHealthIndicatorServiceTests.java index 5c17efacef956..0d063138c7d84 100644 --- a/server/src/test/java/org/elasticsearch/cluster/routing/allocation/ShardsAvailabilityHealthIndicatorServiceTests.java +++ b/server/src/test/java/org/elasticsearch/cluster/routing/allocation/ShardsAvailabilityHealthIndicatorServiceTests.java @@ -73,6 +73,7 @@ import static org.elasticsearch.cluster.routing.allocation.ShardsAvailabilityHealthIndicatorService.ACTION_MIGRATE_TIERS_AWAY_FROM_INCLUDE_DATA_LOOKUP; import static org.elasticsearch.cluster.routing.allocation.ShardsAvailabilityHealthIndicatorService.ACTION_MIGRATE_TIERS_AWAY_FROM_REQUIRE_DATA_LOOKUP; import static org.elasticsearch.cluster.routing.allocation.ShardsAvailabilityHealthIndicatorService.ACTION_RESTORE_FROM_SNAPSHOT; +import static org.elasticsearch.cluster.routing.allocation.ShardsAvailabilityHealthIndicatorService.DIAGNOSIS_WAIT_FOR_OR_FIX_DELAYED_SHARDS; import static org.elasticsearch.cluster.routing.allocation.ShardsAvailabilityHealthIndicatorService.NAME; import static org.elasticsearch.cluster.routing.allocation.ShardsAvailabilityHealthIndicatorServiceTests.ShardState.AVAILABLE; import static org.elasticsearch.cluster.routing.allocation.ShardsAvailabilityHealthIndicatorServiceTests.ShardState.INITIALIZING; @@ -386,7 +387,7 @@ public void testShouldBeYellowWhenRestartingReplicasReachedAllocationDelay() { List.of(ImpactArea.SEARCH) ) ), - List.of(new Diagnosis(ACTION_CHECK_ALLOCATION_EXPLAIN_API, List.of("restarting-index"))) + List.of(new Diagnosis(DIAGNOSIS_WAIT_FOR_OR_FIX_DELAYED_SHARDS, List.of("restarting-index"))) ) ) ); @@ -460,7 +461,7 @@ public void testShouldBeRedWhenRestartingPrimariesReachedAllocationDelayAndNoRep List.of(ImpactArea.INGEST, ImpactArea.SEARCH) ) ), - List.of(new Diagnosis(ACTION_CHECK_ALLOCATION_EXPLAIN_API, List.of("restarting-index"))) + List.of(new Diagnosis(DIAGNOSIS_WAIT_FOR_OR_FIX_DELAYED_SHARDS, List.of("restarting-index"))) ) ) ); diff --git a/server/src/test/java/org/elasticsearch/common/time/JavaDateMathParserTests.java b/server/src/test/java/org/elasticsearch/common/time/JavaDateMathParserTests.java index 5f273ed01ea4a..52305acc1248a 100644 --- a/server/src/test/java/org/elasticsearch/common/time/JavaDateMathParserTests.java +++ b/server/src/test/java/org/elasticsearch/common/time/JavaDateMathParserTests.java @@ -85,6 +85,38 @@ public void testOverridingLocaleOrZoneAndCompositeRoundUpParser() { assertDateEquals(gotMillis, "297276785531", "297276785531"); } + public void testWeekBasedDate() { + DateFormatter formatter = DateFormatter.forPattern("strict_basic_week_date");// YYYY'W'wwe + // first week of 2022 is starting on Monday 3rd Jan + assertDateMathEquals(formatter.toDateMathParser(), "2022W0101", "2022-01-03T23:59:59.999Z", 0, true, ZoneOffset.UTC); + + // defaulting missing day of week + formatter = DateFormatter.forPattern("YYYY'W'ww[e]");// YYYY'W'wwe + // second week of 2022 is starting on Monday 10th Jan + assertDateMathEquals(formatter.toDateMathParser(), "2022W02", "2022-01-10T23:59:59.999Z", 0, true, ZoneOffset.UTC); + } + + public void testDayOfYear() { + DateFormatter formatter = DateFormatter.forPattern("yyyy-DDD'T'HH:mm:ss.SSS"); + assertDateMathEquals(formatter.toDateMathParser(), "2022-104T14:08:30.293", "2022-04-14T14:08:30.293", 0, true, ZoneOffset.UTC); + } + + public void testAMPM() { + DateFormatter formatter = DateFormatter.forPattern("MM/dd/yyyy hh:mm a"); // h clock-hour-of-am-pm (1-12) + assertDateMathEquals(formatter.toDateMathParser(), "04/30/2020 12:48 AM", "2020-04-30T00:48:59.999Z", 0, true, ZoneOffset.UTC); + + formatter = DateFormatter.forPattern("MM/dd/yyyy KK:mm a"); // K hour-of-am-pm (0-11) + assertDateMathEquals(formatter.toDateMathParser(), "04/30/2020 00:48 AM", "2020-04-30T00:48:59.999Z", 0, true, ZoneOffset.UTC); + } + + public void testAMPMWithTimeMissing() { + DateFormatter formatter = DateFormatter.forPattern("MM/dd/yyyy[ hh:mm a]"); // h clock-hour-of-am-pm (1-12) + assertDateMathEquals(formatter.toDateMathParser(), "04/30/2020", "2020-04-30T23:59:59.999Z", 0, true, ZoneOffset.UTC); + + formatter = DateFormatter.forPattern("MM/dd/yyyy[ KK:mm a]"); // K hour-of-am-pm (0-11) + assertDateMathEquals(formatter.toDateMathParser(), "04/30/2020", "2020-04-30T23:59:59.999Z", 0, true, ZoneOffset.UTC); + } + public void testWeekDates() { DateFormatter formatter = DateFormatter.forPattern("YYYY-ww"); assertDateMathEquals(formatter.toDateMathParser(), "2016-01", "2016-01-04T23:59:59.999Z", 0, true, ZoneOffset.UTC); diff --git a/server/src/test/java/org/elasticsearch/index/mapper/DocumentParserTests.java b/server/src/test/java/org/elasticsearch/index/mapper/DocumentParserTests.java index 6c3f50c30de35..e72add443eafc 100644 --- a/server/src/test/java/org/elasticsearch/index/mapper/DocumentParserTests.java +++ b/server/src/test/java/org/elasticsearch/index/mapper/DocumentParserTests.java @@ -33,6 +33,7 @@ import java.math.BigDecimal; import java.math.BigInteger; import java.nio.charset.StandardCharsets; +import java.util.Arrays; import java.util.Collection; import java.util.Collections; import java.util.List; @@ -2351,6 +2352,61 @@ public void testDocumentDescriptionInTsdb() throws IOException { } } + public void testMergeSubfieldWhileBuildingMappers() throws Exception { + MapperService mapperService = createMapperService(); + /* + We had a bug (https://github.com/elastic/elasticsearch/issues/88573) building an object mapper (ObjectMapper.Builder#buildMappers). + A sub-field that already exists is merged with the existing one. As a result, the leaf field would get the wrong field path + (missing the first portion of its path). The only way to trigger this scenario for dynamic mappings is to either allow duplicate + JSON keys or ingest the same field with dots collapsed as well as expanded within the same document. Note that the two fields with + same name need to be part of the same mappings (hence the same document). If they are in two distinct mappings they are properly + merged as part of RootObjectMapper#merge. + */ + ParsedDocument doc = mapperService.documentMapper().parse(source(""" + { + "foo" : { + "bar" : { + "baz" : 1 + } + }, + "foo.bar.baz" : 2 + } + """)); + Mapping mapping = doc.dynamicMappingsUpdate(); + assertNotNull(mapping); + Mapper fooMapper = mapping.getRoot().getMapper("foo"); + assertNotNull(fooMapper); + assertTrue(fooMapper instanceof ObjectMapper); + Mapper barMapper = ((ObjectMapper) fooMapper).getMapper("bar"); + assertTrue(barMapper instanceof ObjectMapper); + Mapper baz = ((ObjectMapper) barMapper).getMapper("baz"); + assertNotNull(baz); + assertEquals("foo.bar.baz", baz.name()); + assertEquals("baz", baz.simpleName()); + IndexableField[] fields = doc.rootDoc().getFields("foo.bar.baz"); + assertEquals(4, fields.length); + long[] longs = Arrays.stream(fields).mapToLong(value -> value.numericValue().longValue()).toArray(); + assertArrayEquals(new long[] { 1, 1, 2, 2 }, longs); + + // merge without going through toXContent and reparsing, otherwise the potential leaf path issue gets fixed on its own + Mapping newMapping = MapperService.mergeMappings(mapperService.documentMapper(), mapping, MapperService.MergeReason.MAPPING_UPDATE); + DocumentMapper newDocMapper = new DocumentMapper(mapperService.documentParser(), newMapping, newMapping.toCompressedXContent()); + ParsedDocument doc2 = newDocMapper.parse(source(""" + { + "foo" : { + "bar" : { + "baz" : 10 + } + } + } + """)); + assertNull(doc2.dynamicMappingsUpdate()); + IndexableField[] fields2 = doc2.rootDoc().getFields("foo.bar.baz"); + assertEquals(2, fields2.length); + long[] longs2 = Arrays.stream(fields2).mapToLong(value -> value.numericValue().longValue()).toArray(); + assertArrayEquals(new long[] { 10, 10 }, longs2); + } + /** * Mapper plugin providing a mock metadata field mapper implementation that supports setting its value */ diff --git a/server/src/test/java/org/elasticsearch/index/mapper/MapperServiceTests.java b/server/src/test/java/org/elasticsearch/index/mapper/MapperServiceTests.java index f3771510d8da9..15a43b41880d9 100644 --- a/server/src/test/java/org/elasticsearch/index/mapper/MapperServiceTests.java +++ b/server/src/test/java/org/elasticsearch/index/mapper/MapperServiceTests.java @@ -8,8 +8,10 @@ package org.elasticsearch.index.mapper; +import org.apache.lucene.index.IndexableField; import org.elasticsearch.Version; import org.elasticsearch.cluster.metadata.IndexMetadata; +import org.elasticsearch.common.Strings; import org.elasticsearch.common.bytes.BytesReference; import org.elasticsearch.common.compress.CompressedXContent; import org.elasticsearch.common.settings.Settings; @@ -353,4 +355,68 @@ public void testMultiFieldChecks() throws IOException { assertFalse(mapperService.isMultiField("object.subfield1")); } + public void testMergeObjectSubfieldWhileParsing() throws IOException { + /* + If we are parsing mappings that hold the definition of the same field twice, the two are merged together. This can happen when + mappings have the same field specified using the object notation as well as the dot notation, as well as when applying index + templates, in which case the two definitions may come from separate index templates that end up in the same map (through + XContentHelper#mergeDefaults, see MetadataCreateIndexService#parseV1Mappings). + We had a bug (https://github.com/elastic/elasticsearch/issues/88573) triggered by this scenario that caused the merged leaf fields + to get the wrong path (missing the first portion). + */ + MapperService mapperService = createMapperService(""" + { + "_doc": { + "properties": { + "obj": { + "properties": { + "sub": { + "properties": { + "string": { + "type": "keyword" + } + } + } + } + }, + "obj.sub.string" : { + "type" : "keyword" + } + } + } + } + """); + + assertNotNull(mapperService.mappingLookup().getMapper("obj.sub.string")); + MappedFieldType fieldType = mapperService.mappingLookup().getFieldType("obj.sub.string"); + assertNotNull(fieldType); + assertEquals(""" + { + "_doc" : { + "properties" : { + "obj" : { + "properties" : { + "sub" : { + "properties" : { + "string" : { + "type" : "keyword" + } + } + } + } + } + } + } + }""", Strings.toString(mapperService.documentMapper().mapping(), true, true)); + + // check that with the resulting mappings a new document has the previously merged field indexed properly + ParsedDocument parsedDocument = mapperService.documentMapper().parse(source(""" + { + "obj.sub.string" : "value" + }""")); + + assertNull(parsedDocument.dynamicMappingsUpdate()); + IndexableField[] fields = parsedDocument.rootDoc().getFields("obj.sub.string"); + assertEquals(2, fields.length); + } } diff --git a/server/src/test/java/org/elasticsearch/index/mapper/MappingParserTests.java b/server/src/test/java/org/elasticsearch/index/mapper/MappingParserTests.java index 3ea480b97c24e..2e0c07940a562 100644 --- a/server/src/test/java/org/elasticsearch/index/mapper/MappingParserTests.java +++ b/server/src/test/java/org/elasticsearch/index/mapper/MappingParserTests.java @@ -143,4 +143,67 @@ public void testBadMetadataMapper() throws IOException { ); assertEquals("[_routing] config must be an object", e.getMessage()); } + + public void testMergeSubfieldWhileParsing() throws Exception { + /* + If we are parsing mappings that hold the definition of the same field twice, the two are merged together. This can happen when + mappings have the same field specified using the object notation as well as the dot notation, as well as when applying index + templates, in which case the two definitions may come from separate index templates that end up in the same map (through + XContentHelper#mergeDefaults, see MetadataCreateIndexService#parseV1Mappings). + We had a bug (https://github.com/elastic/elasticsearch/issues/88573) triggered by this scenario that caused the merged leaf fields + to get the wrong path (missing the first portion). + */ + String mappingAsString = """ + { + "_doc": { + "properties": { + "obj": { + "properties": { + "source": { + "properties": { + "geo": { + "properties": { + "location": { + "type": "geo_point" + } + } + } + } + } + } + }, + "obj.source.geo.location" : { + "type": "geo_point" + } + } + } + } + """; + Mapping mapping = createMappingParser(Settings.EMPTY).parse("_doc", new CompressedXContent(mappingAsString)); + assertEquals(1, mapping.getRoot().mappers.size()); + Mapper object = mapping.getRoot().getMapper("obj"); + assertThat(object, CoreMatchers.instanceOf(ObjectMapper.class)); + assertEquals("obj", object.simpleName()); + assertEquals("obj", object.name()); + ObjectMapper objectMapper = (ObjectMapper) object; + assertEquals(1, objectMapper.mappers.size()); + object = objectMapper.getMapper("source"); + assertThat(object, CoreMatchers.instanceOf(ObjectMapper.class)); + assertEquals("source", object.simpleName()); + assertEquals("obj.source", object.name()); + objectMapper = (ObjectMapper) object; + assertEquals(1, objectMapper.mappers.size()); + object = objectMapper.getMapper("geo"); + assertThat(object, CoreMatchers.instanceOf(ObjectMapper.class)); + assertEquals("geo", object.simpleName()); + assertEquals("obj.source.geo", object.name()); + objectMapper = (ObjectMapper) object; + assertEquals(1, objectMapper.mappers.size()); + Mapper location = objectMapper.getMapper("location"); + assertThat(location, CoreMatchers.instanceOf(GeoPointFieldMapper.class)); + GeoPointFieldMapper geoPointFieldMapper = (GeoPointFieldMapper) location; + assertEquals("obj.source.geo.location", geoPointFieldMapper.name()); + assertEquals("location", geoPointFieldMapper.simpleName()); + assertEquals("obj.source.geo.location", geoPointFieldMapper.mappedFieldType.name()); + } } diff --git a/server/src/test/java/org/elasticsearch/index/mapper/ObjectMapperMergeTests.java b/server/src/test/java/org/elasticsearch/index/mapper/ObjectMapperMergeTests.java index 8627cd4c16598..d762cbd3d8c28 100644 --- a/server/src/test/java/org/elasticsearch/index/mapper/ObjectMapperMergeTests.java +++ b/server/src/test/java/org/elasticsearch/index/mapper/ObjectMapperMergeTests.java @@ -17,13 +17,9 @@ import static java.util.Collections.emptyMap; import static org.hamcrest.Matchers.containsString; -import static org.hamcrest.Matchers.notNullValue; public class ObjectMapperMergeTests extends ESTestCase { - private final FieldMapper barFieldMapper = createTextFieldMapper("bar"); - private final FieldMapper bazFieldMapper = createTextFieldMapper("baz"); - private final RootObjectMapper rootObjectMapper = createMapping(false, true, true, false); private RootObjectMapper createMapping( @@ -35,10 +31,13 @@ private RootObjectMapper createMapping( Map mappers = new HashMap<>(); mappers.put("disabled", createObjectMapper("disabled", disabledFieldEnabled, emptyMap())); Map fooMappers = new HashMap<>(); + MapperBuilderContext fooBuilderContext = MapperBuilderContext.ROOT.createChildContext("foo"); if (includeBarField) { + FieldMapper barFieldMapper = createTextFieldMapper("bar", fooBuilderContext); fooMappers.put("bar", barFieldMapper); } if (includeBazField) { + FieldMapper bazFieldMapper = createTextFieldMapper("baz", fooBuilderContext); fooMappers.put("baz", bazFieldMapper); } mappers.put("foo", createObjectMapper("foo", fooFieldEnabled, Collections.unmodifiableMap(fooMappers))); @@ -54,8 +53,14 @@ public void testMerge() { // THEN "baz" new field is added to merged mapping final ObjectMapper mergedFoo = (ObjectMapper) merged.getMapper("foo"); - assertThat(mergedFoo.getMapper("bar"), notNullValue()); - assertThat(mergedFoo.getMapper("baz"), notNullValue()); + { + Mapper bar = mergedFoo.getMapper("bar"); + assertEquals("bar", bar.simpleName()); + assertEquals("foo.bar", bar.name()); + Mapper baz = mergedFoo.getMapper("baz"); + assertEquals("baz", baz.simpleName()); + assertEquals("foo.baz", baz.name()); + } } public void testMergeWhenDisablingField() { @@ -263,8 +268,8 @@ private ObjectMapper createObjectSubobjectsFalseLeafWithMultiField() { .build(MapperBuilderContext.ROOT); } - private TextFieldMapper createTextFieldMapper(String name) { - return new TextFieldMapper.Builder(name, createDefaultIndexAnalyzers()).build(MapperBuilderContext.ROOT); + private TextFieldMapper createTextFieldMapper(String name, MapperBuilderContext mapperBuilderContext) { + return new TextFieldMapper.Builder(name, createDefaultIndexAnalyzers()).build(mapperBuilderContext); } private TextFieldMapper createTextKeywordMultiField(String name, MapperBuilderContext mapperBuilderContext) { diff --git a/server/src/test/java/org/elasticsearch/lucene/queries/BlendedTermQueryTests.java b/server/src/test/java/org/elasticsearch/lucene/queries/BlendedTermQueryTests.java index 3188aba723a7d..4be9b04b5bce6 100644 --- a/server/src/test/java/org/elasticsearch/lucene/queries/BlendedTermQueryTests.java +++ b/server/src/test/java/org/elasticsearch/lucene/queries/BlendedTermQueryTests.java @@ -248,6 +248,39 @@ public void testMinTTF() throws IOException { dir.close(); } + public void testMissingFields() throws IOException { + Directory dir = newDirectory(); + IndexWriter w = new IndexWriter(dir, newIndexWriterConfig(new MockAnalyzer(random()))); + FieldType ft = new FieldType(TextField.TYPE_NOT_STORED); + ft.freeze(); + + for (int i = 0; i < 10; i++) { + Document d = new Document(); + d.add(new TextField("id", Integer.toString(i), Field.Store.YES)); + d.add(new Field("dense", "foo", ft)); + // Add a sparse field with high totalTermFreq but low docCount + if (i % 5 == 0) { + d.add(new Field("sparse", "foo", ft)); + d.add(new Field("sparse", "one two three four five size", ft)); + } + w.addDocument(d); + } + w.commit(); + + DirectoryReader reader = DirectoryReader.open(w); + IndexSearcher searcher = setSimilarity(newSearcher(reader)); + + String[] fields = new String[] { "dense", "sparse" }; + Query query = BlendedTermQuery.dismaxBlendedQuery(toTerms(fields, "foo"), 0.1f); + TopDocs search = searcher.search(query, 10); + ScoreDoc[] scoreDocs = search.scoreDocs; + assertEquals(Integer.toString(0), reader.document(scoreDocs[0].doc).getField("id").stringValue()); + + reader.close(); + w.close(); + dir.close(); + } + public void testEqualsAndHash() { String[] fields = new String[1 + random().nextInt(10)]; for (int i = 0; i < fields.length; i++) { diff --git a/server/src/test/java/org/elasticsearch/reservedstate/service/FileSettingsServiceTests.java b/server/src/test/java/org/elasticsearch/reservedstate/service/FileSettingsServiceTests.java index 9db5bba768a3b..ff043b216e149 100644 --- a/server/src/test/java/org/elasticsearch/reservedstate/service/FileSettingsServiceTests.java +++ b/server/src/test/java/org/elasticsearch/reservedstate/service/FileSettingsServiceTests.java @@ -56,6 +56,7 @@ public class FileSettingsServiceTests extends ESTestCase { private Environment env; private ClusterService clusterService; private FileSettingsService fileSettingsService; + private ReservedClusterStateService controller; private ThreadPool threadpool; @Before @@ -86,10 +87,7 @@ public void setUp() throws Exception { ClusterSettings clusterSettings = new ClusterSettings(Settings.EMPTY, ClusterSettings.BUILT_IN_CLUSTER_SETTINGS); - ReservedClusterStateService controller = new ReservedClusterStateService( - clusterService, - List.of(new ReservedClusterSettingsAction(clusterSettings)) - ); + controller = new ReservedClusterStateService(clusterService, List.of(new ReservedClusterSettingsAction(clusterSettings))); fileSettingsService = new FileSettingsService(clusterService, controller, env); } @@ -210,11 +208,100 @@ public void testInitialFile() throws Exception { }).when(stateService).process(any(), (XContentParser) any(), any()); service.start(); - service.startWatcher(true); + service.startWatcher(clusterService.state(), true); verify(service, times(1)).processFileSettings(any(), any()); service.stop(); service.close(); } + + @SuppressWarnings("unchecked") + public void testStopWorksInMiddleOfProcessing() throws Exception { + var spiedController = spy(controller); + var fsService = new FileSettingsService(clusterService, spiedController, env); + + FileSettingsService service = spy(fsService); + CountDownLatch processFileLatch = new CountDownLatch(1); + CountDownLatch deadThreadLatch = new CountDownLatch(1); + + doAnswer((Answer) invocation -> { + processFileLatch.countDown(); + new Thread(() -> { + // Simulate a thread that never comes back and decrements the + // countdown latch in FileSettingsService.processFileSettings + try { + deadThreadLatch.await(); + } catch (InterruptedException e) { + throw new RuntimeException(e); + } + }).start(); + return null; + }).when(spiedController).process(any(String.class), any(XContentParser.class), any(Consumer.class)); + + service.start(); + assertTrue(service.watching()); + + Files.createDirectories(service.operatorSettingsDir()); + + // Make some fake settings file to cause the file settings service to process it + Files.write(service.operatorSettingsFile(), "{}".getBytes(StandardCharsets.UTF_8)); + + // we need to wait a bit, on MacOS it may take up to 10 seconds for the Java watcher service to notice the file, + // on Linux is instantaneous. Windows is instantaneous too. + processFileLatch.await(30, TimeUnit.SECONDS); + + // Stopping the service should interrupt the watcher thread, we should be able to stop + service.stop(); + assertFalse(service.watching()); + service.close(); + // let the deadlocked thread end, so we can cleanly exit the test + deadThreadLatch.countDown(); + } + + @SuppressWarnings("unchecked") + public void testStopWorksIfProcessingDidntReturnYet() throws Exception { + var spiedController = spy(controller); + var fsService = new FileSettingsService(clusterService, spiedController, env); + + FileSettingsService service = spy(fsService); + CountDownLatch processFileLatch = new CountDownLatch(1); + CountDownLatch deadThreadLatch = new CountDownLatch(1); + + doAnswer((Answer) invocation -> { + processFileLatch.countDown(); + // allow the other thread to continue, but hold on a bit to avoid + // setting the count-down latch in the main watcher loop. + Thread.sleep(1_000); + new Thread(() -> { + // Simulate a thread that never comes back and decrements the + // countdown latch in FileSettingsService.processFileSettings + try { + deadThreadLatch.await(); + } catch (InterruptedException e) { + throw new RuntimeException(e); + } + }).start(); + return null; + }).when(spiedController).process(any(String.class), any(XContentParser.class), any(Consumer.class)); + + service.start(); + assertTrue(service.watching()); + + Files.createDirectories(service.operatorSettingsDir()); + + // Make some fake settings file to cause the file settings service to process it + Files.write(service.operatorSettingsFile(), "{}".getBytes(StandardCharsets.UTF_8)); + + // we need to wait a bit, on MacOS it may take up to 10 seconds for the Java watcher service to notice the file, + // on Linux is instantaneous. Windows is instantaneous too. + processFileLatch.await(30, TimeUnit.SECONDS); + + // Stopping the service should interrupt the watcher thread, we should be able to stop + service.stop(); + assertFalse(service.watching()); + service.close(); + // let the deadlocked thread end, so we can cleanly exit the test + deadThreadLatch.countDown(); + } } diff --git a/server/src/test/java/org/elasticsearch/reservedstate/service/ReservedClusterStateServiceTests.java b/server/src/test/java/org/elasticsearch/reservedstate/service/ReservedClusterStateServiceTests.java index 478ca01f2de96..9316b65eae5e3 100644 --- a/server/src/test/java/org/elasticsearch/reservedstate/service/ReservedClusterStateServiceTests.java +++ b/server/src/test/java/org/elasticsearch/reservedstate/service/ReservedClusterStateServiceTests.java @@ -41,6 +41,7 @@ import java.util.concurrent.atomic.AtomicReference; import java.util.function.Consumer; +import static org.elasticsearch.reservedstate.service.ReservedStateUpdateTask.checkMetadataVersion; import static org.hamcrest.Matchers.anyOf; import static org.hamcrest.Matchers.contains; import static org.hamcrest.Matchers.containsString; @@ -141,7 +142,7 @@ public void testUpdateStateTasks() throws Exception { null, Collections.emptyMap(), Collections.emptySet(), - (errorState) -> {}, + (clusterState, errorState) -> {}, new ActionListener<>() { @Override public void onResponse(ActionResponse.Empty empty) {} @@ -191,12 +192,16 @@ public void onFailure(Exception failure) {} public void testErrorStateTask() throws Exception { ClusterState state = ClusterState.builder(new ClusterName("test")).build(); + final var listenerCompleted = new AtomicBoolean(false); + ReservedStateErrorTask task = spy( new ReservedStateErrorTask( new ErrorState("test", 1L, List.of("some parse error", "some io error"), ReservedStateErrorMetadata.ErrorKind.PARSING), new ActionListener<>() { @Override - public void onResponse(ActionResponse.Empty empty) {} + public void onResponse(ActionResponse.Empty empty) { + listenerCompleted.set(true); + } @Override public void onFailure(Exception e) {} @@ -241,6 +246,7 @@ public void onFailure(Exception failure) {} assertEquals(1L, (long) operatorMetadata.errorMetadata().version()); assertEquals(ReservedStateErrorMetadata.ErrorKind.PARSING, operatorMetadata.errorMetadata().errorKind()); assertThat(operatorMetadata.errorMetadata().errors(), contains("some parse error", "some io error")); + assertTrue(listenerCompleted.get()); } public void testUpdateTaskDuplicateError() { @@ -281,7 +287,7 @@ public Map fromXContent(XContentParser parser) throws IOExceptio ReservedStateHandlerMetadata hmOne = new ReservedStateHandlerMetadata("one", Set.of("a", "b")); ReservedStateErrorMetadata emOne = new ReservedStateErrorMetadata( - 1L, + 2L, ReservedStateErrorMetadata.ErrorKind.VALIDATION, List.of("Test error 1", "Test error 2") ); @@ -295,20 +301,20 @@ public Map fromXContent(XContentParser parser) throws IOExceptio Metadata metadata = Metadata.builder().put(operatorMetadata).build(); ClusterState state = ClusterState.builder(new ClusterName("test")).metadata(metadata).build(); + assertFalse(ReservedClusterStateService.isNewError(operatorMetadata, 2L)); assertFalse(ReservedClusterStateService.isNewError(operatorMetadata, 1L)); - assertFalse(ReservedClusterStateService.isNewError(operatorMetadata, 0L)); - assertTrue(ReservedClusterStateService.isNewError(operatorMetadata, 2L)); - assertTrue(ReservedClusterStateService.isNewError(null, 0L)); + assertTrue(ReservedClusterStateService.isNewError(operatorMetadata, 3L)); + assertTrue(ReservedClusterStateService.isNewError(null, 1L)); // We submit a task with two handler, one will cause an exception, the other will create a new state. // When we fail to update the metadata because of version, we ensure that the returned state is equal to the // original state by pointer reference to avoid cluster state update task to run. ReservedStateUpdateTask task = new ReservedStateUpdateTask( "namespace_one", - new ReservedStateChunk(Map.of("one", "two", "maker", "three"), new ReservedStateVersion(1L, Version.CURRENT)), + new ReservedStateChunk(Map.of("one", "two", "maker", "three"), new ReservedStateVersion(2L, Version.CURRENT)), Map.of(exceptionThrower.name(), exceptionThrower, newStateMaker.name(), newStateMaker), List.of(exceptionThrower.name(), newStateMaker.name()), - (errorState) -> { assertFalse(ReservedClusterStateService.isNewError(operatorMetadata, errorState.version())); }, + (clusterState, errorState) -> { assertFalse(ReservedClusterStateService.isNewError(operatorMetadata, errorState.version())); }, new ActionListener<>() { @Override public void onResponse(ActionResponse.Empty empty) {} @@ -350,20 +356,12 @@ public void onFailure(Exception e) {} public void testCheckMetadataVersion() { ReservedStateMetadata operatorMetadata = ReservedStateMetadata.builder("test").version(123L).build(); - assertTrue( - ReservedClusterStateService.checkMetadataVersion("operator", operatorMetadata, new ReservedStateVersion(124L, Version.CURRENT)) - ); + assertTrue(checkMetadataVersion("operator", operatorMetadata, new ReservedStateVersion(124L, Version.CURRENT))); - assertFalse( - ReservedClusterStateService.checkMetadataVersion("operator", operatorMetadata, new ReservedStateVersion(123L, Version.CURRENT)) - ); + assertFalse(checkMetadataVersion("operator", operatorMetadata, new ReservedStateVersion(123L, Version.CURRENT))); assertFalse( - ReservedClusterStateService.checkMetadataVersion( - "operator", - operatorMetadata, - new ReservedStateVersion(124L, Version.fromId(Version.CURRENT.id + 1)) - ) + checkMetadataVersion("operator", operatorMetadata, new ReservedStateVersion(124L, Version.fromId(Version.CURRENT.id + 1))) ); } diff --git a/server/src/test/java/org/elasticsearch/rest/RestControllerTests.java b/server/src/test/java/org/elasticsearch/rest/RestControllerTests.java index 69632e51176f0..b62281478153e 100644 --- a/server/src/test/java/org/elasticsearch/rest/RestControllerTests.java +++ b/server/src/test/java/org/elasticsearch/rest/RestControllerTests.java @@ -13,11 +13,14 @@ import org.elasticsearch.common.bytes.BytesArray; import org.elasticsearch.common.bytes.BytesReference; import org.elasticsearch.common.component.AbstractLifecycleComponent; +import org.elasticsearch.common.io.stream.BytesStream; +import org.elasticsearch.common.io.stream.RecyclerBytesStreamOutput; import org.elasticsearch.common.settings.ClusterSettings; import org.elasticsearch.common.settings.Settings; import org.elasticsearch.common.transport.BoundTransportAddress; import org.elasticsearch.common.transport.TransportAddress; import org.elasticsearch.common.unit.ByteSizeValue; +import org.elasticsearch.common.util.MockPageCacheRecycler; import org.elasticsearch.common.util.concurrent.ThreadContext; import org.elasticsearch.core.IOUtils; import org.elasticsearch.core.RestApiVersion; @@ -28,11 +31,13 @@ import org.elasticsearch.http.HttpStats; import org.elasticsearch.indices.breaker.HierarchyCircuitBreakerService; import org.elasticsearch.rest.RestHandler.Route; +import org.elasticsearch.rest.action.RestToXContentListener; import org.elasticsearch.tasks.Task; import org.elasticsearch.test.ESTestCase; import org.elasticsearch.test.client.NoOpNodeClient; import org.elasticsearch.test.rest.FakeRestRequest; import org.elasticsearch.tracing.Tracer; +import org.elasticsearch.transport.BytesRefRecycler; import org.elasticsearch.usage.UsageService; import org.elasticsearch.xcontent.NamedXContentRegistry; import org.elasticsearch.xcontent.XContentBuilder; @@ -389,6 +394,30 @@ public void testDispatchRequestAddsAndFreesBytesOnlyOnceOnError() { assertEquals(0, inFlightRequestsBreaker.getUsed()); } + public void testDispatchRequestAddsAndFreesBytesOnlyOnceOnErrorDuringSend() { + int contentLength = Math.toIntExact(BREAKER_LIMIT.getBytes()); + String content = randomAlphaOfLength((int) Math.round(contentLength / inFlightRequestsBreaker.getOverhead())); + // use a real recycler that tracks leaks and create some content bytes in the test handler to check for leaks + final BytesRefRecycler recycler = new BytesRefRecycler(new MockPageCacheRecycler(Settings.EMPTY)); + restController.registerHandler( + new Route(GET, "/foo"), + (request, c, client) -> new RestToXContentListener<>(c).onResponse((b, p) -> b.startObject().endObject()) + ); + // we will produce an error in the rest handler and one more when sending the error response + RestRequest request = testRestRequest("/foo", content, XContentType.JSON); + ExceptionThrowingChannel channel = new ExceptionThrowingChannel(request, true) { + @Override + protected BytesStream newBytesOutput() { + return new RecyclerBytesStreamOutput(recycler); + } + }; + + restController.dispatchRequest(request, channel, client.threadPool().getThreadContext()); + + assertEquals(0, inFlightRequestsBreaker.getTrippedCount()); + assertEquals(0, inFlightRequestsBreaker.getUsed()); + } + public void testDispatchRequestLimitsBytes() { int contentLength = BREAKER_LIMIT.bytesAsInt() + 1; String content = randomAlphaOfLength((int) Math.round(contentLength / inFlightRequestsBreaker.getOverhead())); @@ -988,7 +1017,7 @@ boolean getSendResponseCalled() { } - private static final class ExceptionThrowingChannel extends AbstractRestChannel { + private static class ExceptionThrowingChannel extends AbstractRestChannel { protected ExceptionThrowingChannel(RestRequest request, boolean detailedErrorsEnabled) { super(request, detailedErrorsEnabled); diff --git a/server/src/test/java/org/elasticsearch/rest/action/search/RestKnnSearchActionTests.java b/server/src/test/java/org/elasticsearch/rest/action/search/RestKnnSearchActionTests.java new file mode 100644 index 0000000000000..e57d54b8fdde9 --- /dev/null +++ b/server/src/test/java/org/elasticsearch/rest/action/search/RestKnnSearchActionTests.java @@ -0,0 +1,39 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License + * 2.0 and the Server Side Public License, v 1; you may not use this file except + * in compliance with, at your election, the Elastic License 2.0 or the Server + * Side Public License, v 1. + */ +package org.elasticsearch.rest.action.search; + +import org.elasticsearch.core.RestApiVersion; +import org.elasticsearch.rest.RestRequest; +import org.elasticsearch.test.rest.FakeRestRequest; +import org.elasticsearch.test.rest.RestActionTestCase; +import org.junit.Before; + +import java.util.Collections; +import java.util.List; +import java.util.Map; + +public class RestKnnSearchActionTests extends RestActionTestCase { + private List contentTypeHeader; + private RestKnnSearchAction action; + + @Before + public void setUpAction() { + action = new RestKnnSearchAction(); + controller().registerHandler(action); + contentTypeHeader = Collections.singletonList(randomCompatibleMediaType(RestApiVersion.V_8)); + } + + public void testDeprecation() { + RestRequest request = new FakeRestRequest.Builder(xContentRegistry()).withHeaders( + Map.of("Content-Type", contentTypeHeader, "Accept", contentTypeHeader) + ).withMethod(RestRequest.Method.GET).withPath("/some_index/_knn_search").build(); + + dispatchRequest(request); + assertCriticalWarnings(RestKnnSearchAction.DEPRECATION_MESSAGE); + } +} diff --git a/server/src/test/java/org/elasticsearch/script/UpdateCtxMapTests.java b/server/src/test/java/org/elasticsearch/script/UpdateCtxMapTests.java index 1cbb67ff30f7e..cb72c90d5f593 100644 --- a/server/src/test/java/org/elasticsearch/script/UpdateCtxMapTests.java +++ b/server/src/test/java/org/elasticsearch/script/UpdateCtxMapTests.java @@ -91,7 +91,7 @@ public void testTimestamp() { IllegalArgumentException err = expectThrows(IllegalArgumentException.class, () -> meta.put("_now", 1234)); assertEquals("_now cannot be updated", err.getMessage()); assertEquals(TS, meta.get("_now")); - ZonedDateTime zdt = meta.getTimestamp(); + ZonedDateTime zdt = meta.getNow(); assertEquals(4, zdt.getMonthValue()); assertEquals(26, zdt.getDayOfMonth()); assertEquals(1992, zdt.getYear()); diff --git a/server/src/test/java/org/elasticsearch/snapshots/SnapshotResiliencyTests.java b/server/src/test/java/org/elasticsearch/snapshots/SnapshotResiliencyTests.java index 4448a26b5f144..18c35c374c35d 100644 --- a/server/src/test/java/org/elasticsearch/snapshots/SnapshotResiliencyTests.java +++ b/server/src/test/java/org/elasticsearch/snapshots/SnapshotResiliencyTests.java @@ -166,6 +166,7 @@ import org.elasticsearch.repositories.blobstore.BlobStoreRepository; import org.elasticsearch.repositories.blobstore.BlobStoreTestUtil; import org.elasticsearch.repositories.fs.FsRepository; +import org.elasticsearch.reservedstate.service.FileSettingsService; import org.elasticsearch.script.ScriptCompiler; import org.elasticsearch.script.ScriptService; import org.elasticsearch.search.SearchService; @@ -1932,7 +1933,8 @@ protected void assertSnapshotOrGenericThread() { new IndexMetadataVerifier(settings, namedXContentRegistry, mapperRegistry, indexScopedSettings, ScriptCompiler.NONE), shardLimitValidator, EmptySystemIndices.INSTANCE, - indicesService + indicesService, + mock(FileSettingsService.class) ); actions.put( PutMappingAction.INSTANCE, diff --git a/test/framework/src/main/java/org/elasticsearch/cli/CommandTestCase.java b/test/framework/src/main/java/org/elasticsearch/cli/CommandTestCase.java index 09e07f32d820a..6c3573e2594d3 100644 --- a/test/framework/src/main/java/org/elasticsearch/cli/CommandTestCase.java +++ b/test/framework/src/main/java/org/elasticsearch/cli/CommandTestCase.java @@ -41,12 +41,27 @@ public abstract class CommandTestCase extends ESTestCase { /** The ES config dir */ protected Path configDir; + /** Whether to include a whitespace in the file-system path. */ + private final boolean spaceInPath; + + protected CommandTestCase() { + this(false); + } + + protected CommandTestCase(boolean spaceInPath) { + this.spaceInPath = spaceInPath; + } + @Before public void resetTerminal() throws IOException { terminal.reset(); terminal.setSupportsBinary(false); terminal.setVerbosity(Terminal.Verbosity.NORMAL); - esHomeDir = createTempDir(); + if (spaceInPath) { + esHomeDir = createTempDir("a b"); // contains a whitespace + } else { + esHomeDir = createTempDir(); + } configDir = esHomeDir.resolve("config"); Files.createDirectory(configDir); sysprops.clear(); diff --git a/test/framework/src/main/java/org/elasticsearch/test/TaskAssertions.java b/test/framework/src/main/java/org/elasticsearch/test/TaskAssertions.java index 80361194c4580..5f564f1338dbe 100644 --- a/test/framework/src/main/java/org/elasticsearch/test/TaskAssertions.java +++ b/test/framework/src/main/java/org/elasticsearch/test/TaskAssertions.java @@ -16,6 +16,7 @@ import org.elasticsearch.transport.TransportService; import java.util.List; +import java.util.concurrent.TimeUnit; import static junit.framework.TestCase.assertTrue; import static junit.framework.TestCase.fail; @@ -60,7 +61,7 @@ public static void assertAllCancellableTasksAreCancelled(String actionPrefix) th } } assertTrue("found no cancellable tasks", foundTask); - }); + }, 30, TimeUnit.SECONDS); } public static void assertAllTasksHaveFinished(String actionPrefix) throws Exception { diff --git a/test/framework/src/main/java/org/elasticsearch/test/transport/MockTransportService.java b/test/framework/src/main/java/org/elasticsearch/test/transport/MockTransportService.java index 2c771350ceb4d..57acb1b9e79f0 100644 --- a/test/framework/src/main/java/org/elasticsearch/test/transport/MockTransportService.java +++ b/test/framework/src/main/java/org/elasticsearch/test/transport/MockTransportService.java @@ -37,6 +37,7 @@ import org.elasticsearch.test.tasks.MockTaskManager; import org.elasticsearch.threadpool.ThreadPool; import org.elasticsearch.tracing.Tracer; +import org.elasticsearch.transport.BytesTransportRequest; import org.elasticsearch.transport.ClusterConnectionManager; import org.elasticsearch.transport.ConnectTransportException; import org.elasticsearch.transport.ConnectionProfile; @@ -502,10 +503,21 @@ public void sendRequest( } // poor mans request cloning... - RequestHandlerRegistry reg = MockTransportService.this.getRequestHandler(action); BytesStreamOutput bStream = new BytesStreamOutput(); request.writeTo(bStream); - final TransportRequest clonedRequest = reg.newRequest(bStream.bytes().streamInput()); + final TransportRequest clonedRequest; + if (request instanceof BytesTransportRequest) { + // Some request handlers read back a BytesTransportRequest + // into a different class that cannot be re-serialized (i.e. JOIN_VALIDATE_ACTION_NAME), + // in those cases we just copy the raw bytes back to a BytesTransportRequest. + // This is only needed for the BwC for JOIN_VALIDATE_ACTION_NAME and can be removed in the next major + assert Version.CURRENT.major == Version.V_7_17_0.major + 1; + clonedRequest = new BytesTransportRequest(bStream.bytes().streamInput()); + } else { + RequestHandlerRegistry reg = MockTransportService.this.getRequestHandler(action); + clonedRequest = reg.newRequest(bStream.bytes().streamInput()); + } + assert clonedRequest.getClass().equals(request.getClass()) : clonedRequest + " vs " + request; final RunOnce runnable = new RunOnce(new AbstractRunnable() { @Override diff --git a/x-pack/docs/en/security/auditing/event-types.asciidoc b/x-pack/docs/en/security/auditing/event-types.asciidoc index b65c0fb3da31b..0e83bb9d39fcb 100644 --- a/x-pack/docs/en/security/auditing/event-types.asciidoc +++ b/x-pack/docs/en/security/auditing/event-types.asciidoc @@ -761,8 +761,8 @@ the <>. + [source,js] ---- -`{"id": , "name": , "expiration": , "role_descriptors" [], -"metadata" []}` +`{"id": , "name": , "expiration": , "role_descriptors": [], +"metadata": []}` ---- // NOTCONSOLE + diff --git a/x-pack/docs/en/security/authentication/internal-users.asciidoc b/x-pack/docs/en/security/authentication/internal-users.asciidoc index 91b5d70239495..86fc0254fed76 100644 --- a/x-pack/docs/en/security/authentication/internal-users.asciidoc +++ b/x-pack/docs/en/security/authentication/internal-users.asciidoc @@ -2,9 +2,9 @@ [[internal-users]] === Internal users -The {stack-security-features} use three _internal_ users (`_system`, `_xpack`, -and `_xpack_security`), which are responsible for the operations that take place -inside an {es} cluster. +The {stack-security-features} use four _internal_ users (`_system`, `_xpack`, +`_xpack_security`, and `_async_search`), which are responsible for the operations +that take place inside an {es} cluster. These users are only used by requests that originate from within the cluster. For this reason, they cannot be used to authenticate against the API and there diff --git a/x-pack/docs/en/security/authentication/jwt-realm.asciidoc b/x-pack/docs/en/security/authentication/jwt-realm.asciidoc index 24edf2b2a9597..c3d3f2ab4dfd3 100644 --- a/x-pack/docs/en/security/authentication/jwt-realm.asciidoc +++ b/x-pack/docs/en/security/authentication/jwt-realm.asciidoc @@ -4,16 +4,16 @@ beta::[] -{es} can be configured to trust JSON Web Tokens (JWTs) that are issued as an -authentication credential from an external service. +{es} can be configured to trust JSON Web Tokens (JWTs) issued from an external service +as bearer tokens for authentication. When a JWT realm is used to authenticate with {es}, a distinction is made between the _client_ that is connecting to {es}, and the _user_ on whose behalf -the request should run. The JWT identifies the user, and a separate credential -is used to authenticate the client. +the request should run. The JWT authenticates the user, and a separate credential +authenticates the client. -A common scenario that uses JWTs is when an existing front-end application uses -OpenID Connect (OIDC) as an authentication method, and then accesses {es} +A common scenario for JWTs is when an existing front-end application uses +OpenID Connect (OIDC) to authenticate and identify a user, and then accesses {es} on behalf of the authenticated user. TIP: If the front-end application does not exist, you can use the @@ -21,22 +21,27 @@ TIP: If the front-end application does not exist, you can use the [[jwt-realm-oidc]] ==== JWT uses OIDC workflows -JWT authentication in {es} is derived from OIDC workflows, where different +JWT authentication in {es} is derived from OIDC user workflows, where different tokens can be issued by an OIDC Provider (OP). One possible token is an _ID token_, which uses the JWT format. If the ID token is presented to a JWT -realm, {es} can use it to authenticate, identify, and authorize an individual +realm, {es} can use it as a bearer token to authenticate, identify, and authorize an individual user. -NOTE: Because JWTs are external to {es}, you can define a custom workflow +NOTE: Because JWTs are obtained external to {es}, you can define a custom workflow instead of using the OIDC workflow. However, the JWT format must still be JSON Web Signature (JWS). The JWS header and JWS signature are validated using OIDC ID token validation rules. {es} supports a separate <>, which provides -stronger security guarantees than the JWT realm and is preferred for any +stronger security guarantees than the JWT realm, and is preferred for any use case where {es} can act as an OIDC RP. The OIDC realm is the only supported way to enable OIDC authentication in {kib}. +TIP: If JWTs are issued for the front-end application, the application is the realm client and JWT user. +That is not supported by OIDC flows, but it may be supported by bespoke JWT issuers. +In that case, use the client secret and JWT for the client application, and the +`es-security-runas-user` HTTP request header for the different user. See <>. + [[jwt-realm-configuration]] ==== Configure {es} to use a JWT realm @@ -124,7 +129,7 @@ The file can be removed after you load the contents into the {es} keystore. [NOTE] ==== Using the JWKS is preferred. However, you can add an HMAC key in string format -using the following command. This format is compatible with OIDC HMAC keys, but +using the following command. This format is compatible with HMAC UTF-8 keys, but only supports a single key with no attributes. You can only use one HMAC format (either `hmac_jwkset` or `hmac_key`) simultaneously. @@ -196,6 +201,10 @@ NOTE: You can relax validation of any of the time-based claims by setting validating JWTs with respect to their authentication time (`auth_time`), creation (`iat`), not before (`nbf`), and expiration times (`exp`). +`iss`:: +(Required, String) Denotes the issuer that created the ID token. The value must +be an exact, case-sensitive match to the value in the `allowed_issuer` setting. + `aud`:: (Required, String) Indicates the audiences that the ID token is for, expressed as a comma-separated value (CSV). One of the values must be an exact, case-sensitive @@ -203,23 +212,19 @@ match to any of the CSV values in the `allowed_audiences` setting. `exp`:: (Required, integer) Expiration time for the ID token, expressed in UTC -milliseconds since epoch. +seconds since epoch. `iat`:: (Required, integer) Time that the ID token was issued, expressed in UTC -milliseconds since epoch. - -`iss`:: -(Required, String) Denotes the issuer that created the ID token. The value must -be an exact, case-sensitive match to the value in the `allowed_issuer` setting. +seconds since epoch. `nbf`:: (Optional, integer) Indicates the time before which the JWT must not be accepted, -expressed as UTC milliseconds since epoch. +expressed as UTC seconds since epoch. `auth_time`:: (Optional, integer) Time when the user authenticated to the JWT issuer, -expressed as UTC milliseconds since epoch. +expressed as UTC seconds since epoch. [[jwt-validation-payload-es]] ====== {es} settings for consuming OIDC claims @@ -259,7 +264,7 @@ setting `claims.dn_pattern` to extract a substring value. ==== JWT realm authorization The JWT realm supports authorization with the create or update role mappings API, or delegating authorization to another realm. You cannot use these methods -simultaneously, so choose whichever works best for your environment. +simultaneously, so choose whichever works best for your environment. IMPORTANT: You cannot map roles in the JWT realm using the `role_mapping.yml` file. @@ -352,7 +357,7 @@ linked to realm `native1`. [[jwt-realm-runas]] ===== Applying the `run_as` privilege to JWT realm users -{es} can retrieve roles for a JWT user through either role mapping or +{es} can retrieve roles for a JWT user through either role mapping or delegated authorization. Regardless of which option you choose, you can apply the <> to a role so that a user can submit authenticated requests to "run as" a different user. To submit requests as @@ -415,7 +420,7 @@ the `jwt_role1` role that you mapped to this user in the JWT realm: "metadata":{"jwt_claim_email":"user2@something.example.com","jwt_claim_aud":["es01","es02","es03"], "jwt_claim_sub":"user2","jwt_claim_iss":"my-issuer"},"enabled":true,"authentication_realm": {"name":"jwt2","type":"jwt"},"lookup_realm":{"name":"jwt2","type":"jwt"},"authentication_type":"realm"} -% +% ---- If you want to specify a request as the `run_as` user, include the @@ -435,11 +440,54 @@ and {es} used the `jwt_role1` role: ---- {"username":"user123_runas","roles":["jwt_role1"],"full_name":null,"email":null,"metadata":{}, "enabled":true,"authentication_realm":{"name":"jwt2","type":"jwt"},"lookup_realm":{"name":"native", -"type":"native"},"authentication_type":"realm"}% +"type":"native"},"authentication_type":"realm"}% ---- +[[jwt-realm-jwkset-reloading]] +===== PKC JWKS reloading +JWT authentication supports signature verification using PKC (Public Key Cryptography) +or HMAC algorithms. + +PKC JSON Web Token Key Sets (JWKS) can contain public RSA and EC keys. HMAC JWKS +or an HMAC UTF-8 JWK contain secret keys. JWT issuers typically rotate PKC JWKS +more frequently (such as daily), because RSA and EC public keys are designed to +be easier to distribute than secret keys like HMAC. + +JWT realms load a PKC JWKS and an HMAC JWKS or HMAC UTF-8 JWK at startup. JWT +realms can also reload PKC JWKS contents at runtime; a reload is triggered by +signature validation failures. + +NOTE: HMAC JWKS or HMAC UTF-8 JWK reloading is not supported at this time. + +Load failures, parse errors, and configuration errors prevent a node from +starting (and restarting). However, runtime PKC reload errors and recoveries are +handled gracefully. + +All other JWT realm validations are checked before a signature failure can +trigger a PKC JWKS reload. If multiple JWT authentication signature failures +occur simultaneously with a single {es} node, reloads are combined to reduce +the reloads that are sent externally. + +Separate reload requests cannot be combined if JWT signature failures trigger: + +* PKC JWKS reloads in different {es} nodes +* PKC JWKS reloads in the same {es} node at different times + +[IMPORTANT] +==== +Enabling client authentication (`client_authentication.type`) is strongly +recommended. Only trusted client applications and realm-specific JWT users can +trigger PKC reload attempts. Additionally, configuring the following +<> is recommended: + +* `allowed_audiences` +* `allowed_clock_skew` +* `allowed_issuer` +* `allowed_signature_algorithms` +==== + [[hmac-oidc-example]] -==== Authorizing to the JWT realm with an OIDC HMAC key +==== Authorizing to the JWT realm with an HMAC UTF-8 key The following settings are for a JWT issuer, {es}, and a client of {es}. The example HMAC key is in an OIDC format that's compatible with HMAC. The key bytes are the UTF-8 encoding of the UNICODE characters. @@ -456,7 +504,7 @@ The following values are for the bespoke JWT issuer. Issuer: iss8 Audiences: aud8 Algorithms: HS256 -HMAC OIDC: hmac-oidc-key-string-for-hs256-algorithm +HMAC UTF-8: hmac-oidc-key-string-for-hs256-algorithm ---- // NOTCONSOLE @@ -477,7 +525,7 @@ xpack.security.authc.realms.jwt.jwt8.client_authentication.type: shared_secret realm chain on {ecloud}. ===== JWT realm secure settings -After defining the realm settings, use the +After defining the realm settings, use the {ref}/elasticsearch-keystore.html[`elasticsearch-keystore`] tool to add the following secure settings to the {es} keystore. In {ecloud}, you define settings for the {es} keystore under **Security** in your deployment. @@ -536,5 +584,5 @@ JWT realm itself. "metadata":{"jwt_claim_email":"user2@something.example.com","jwt_claim_aud":["es01","es02","es03"], "jwt_claim_sub":"user2","jwt_claim_iss":"my-issuer"},"enabled":true,"authentication_realm": {"name":"jwt2","type":"jwt"},"lookup_realm":{"name":"jwt2","type":"jwt"},"authentication_type":"realm"} -% +% ---- diff --git a/x-pack/docs/en/security/authentication/overview.asciidoc b/x-pack/docs/en/security/authentication/overview.asciidoc index 54e1e1dcbad79..96646d30b6ec0 100644 --- a/x-pack/docs/en/security/authentication/overview.asciidoc +++ b/x-pack/docs/en/security/authentication/overview.asciidoc @@ -51,6 +51,7 @@ include::kerberos-realm.asciidoc[] include::jwt-realm.asciidoc[] include::custom-realm.asciidoc[] include::anonymous-access.asciidoc[] +include::user-lookup.asciidoc[] include::user-cache.asciidoc[] include::saml-guide.asciidoc[leveloffset=+1] include::oidc-guide.asciidoc[leveloffset=+1] diff --git a/x-pack/docs/en/security/authentication/realm-chains.asciidoc b/x-pack/docs/en/security/authentication/realm-chains.asciidoc index 08da78b14442a..0a9370f2589c2 100644 --- a/x-pack/docs/en/security/authentication/realm-chains.asciidoc +++ b/x-pack/docs/en/security/authentication/realm-chains.asciidoc @@ -78,7 +78,7 @@ LDAP group assignments to determine their roles in Elasticsearch. Any realm that supports retrieving users (without needing their credentials) can be used as an _authorization realm_ (that is, its name may appear as one of the -values in the list of `authorization_realms`). See <> for +values in the list of `authorization_realms`). See <> for further explanation on which realms support this. For realms that support this feature, it can be enabled by configuring the diff --git a/x-pack/docs/en/security/authentication/security-domain.asciidoc b/x-pack/docs/en/security/authentication/security-domain.asciidoc index 2d0be7e61c32b..3fa442eed712b 100644 --- a/x-pack/docs/en/security/authentication/security-domain.asciidoc +++ b/x-pack/docs/en/security/authentication/security-domain.asciidoc @@ -22,7 +22,8 @@ Some types of resources in {es} are owned by a single user, such as <>, <>, and <>. When a user creates a resource, {es} captures the user's username and realm information as part of the resource's -metadata. +metadata. Likewise, if a user updates a resource, such as an API key, +{es} automatically re-captures the user's current realm information. When a user later attempts to access the resource, {es} compares the captured username and realm information against those from the accessing @@ -124,13 +125,15 @@ When adding realms to a security domain, avoid authenticating with a newly-added Removing realms from a security domain can lead to unexpected behaviors and is not recommended. -Resources created before the removal can be owned by different users depending on the resource type: +Resources created or updated before the removal can be owned by different users depending on the resource type: - <> are owned by the user for whom the profile was last <>. For users whose realms are no longer in the same domain as the owner user, a new user profile will be created for them next time the activate user profile API is called. -- Resources such as API keys are owned by the user who originally created them. +- An API key is owned by the user who originally <> or last <> it. +Users, including the original creator of the API key, will lose ownership if their realms are no longer in the same domain as those of the current API key owner. +- Resources such as async search contexts are owned by the user who originally created them. Instead of removing realms, consider disabling them and keeping them as part of the security domain. Under all circumstances, resource sharing across realms is only possible between users with the same username. diff --git a/x-pack/docs/en/security/authentication/token-authentication-services.asciidoc b/x-pack/docs/en/security/authentication/token-authentication-services.asciidoc index f7cab7e48344d..8e49ab678f087 100644 --- a/x-pack/docs/en/security/authentication/token-authentication-services.asciidoc +++ b/x-pack/docs/en/security/authentication/token-authentication-services.asciidoc @@ -33,6 +33,7 @@ curl -H "Authorization: Bearer AAEAAWVsYXN0aWMvZ...mXQtc2VydmMTpyNXdkYmRib1FTZTl include::service-accounts.asciidoc[tag=service-accounts-usage] -- +[[token-authentication-access-token]] _token-service_:: The token service uses the <> to generate access tokens and refresh tokens based on the OAuth2 specification. @@ -51,6 +52,7 @@ curl -H "Authorization: Bearer dGhpcyBpcyBub3Qx5...F0YS4gZG8gbm90IHRyeSB0byByZWF // NOTCONSOLE -- +[[token-authentication-api-key]] _api-key-service_:: The API key service uses the <> to generate API keys. diff --git a/x-pack/docs/en/security/authentication/user-lookup.asciidoc b/x-pack/docs/en/security/authentication/user-lookup.asciidoc new file mode 100644 index 0000000000000..179abd2de1e5c --- /dev/null +++ b/x-pack/docs/en/security/authentication/user-lookup.asciidoc @@ -0,0 +1,66 @@ +[role="xpack"] +[[user-lookup]] +=== Looking up users without authentication + +{es} <> exist primarily to support +<>. +Some realms authenticate users with a password (such as the +<> and <> realms), and other realms use +more complex authentication protocols (such as the <> and +<> realms). +In each case, the _primary_ purpose of the realm is to establish the identity of +the user who has made a request to the {es} API. + +However, some {es} features need to _look up_ a user without using their credentials. + +- The <> feature executes requests on behalf of + another user. An authenticated user with `run_as` privileges can perform + requests on behalf of another unauthenticated user. + +- The <> feature links two realms + together so that a user who authenticates against one realm can have the roles + and metadata associated with a user from a different realm. + +In each of these cases, a user must first authenticate to one realm and then +{es} will query the second realm to find another user. +The authenticated user credentials are used to authenticate in the first realm only, +The user in the second realm is retrieved by username, without needing credentials. + +When {es} resolves a user using their credentials (as performed in the first realm), +it is known as _user authentication_. + +When {es} resolves a user using the username only (as performed in the second realm), +it is known as _user lookup_. + +See the <> and <> +documentation to learn more about these features, including which realms and authentication +methods support `run_as` or delegated authorization. +In both cases, only the following realms can be used for the user lookup: + +* The reserved, <> and <> realms always +support user lookup. +* The <> realm supports user lookup when the realm is configured +in <>. User lookup is not support +when the realm is configured with `user_dn_templates`. +* User lookup support in the <> realm +requires that the realm be configured with a <> and a +bind password. + +The `pki`, `saml`, `oidc`, `kerberos` and `jwt` realms do not support user +lookup. + +NOTE: If you want to use a realm only for user lookup and prevent users from +authenticating against that realm, you can <> +and set `authentication.enabled` to `false` + +The user lookup feature is an internal capability that is used to implement the +`run-as` and delegated authorization features - there are no APIs for user lookup. +If you wish to test your user lookup configuration, then you can do this with +`run_as`. Use the <> API, authenticate as a +`superuser` (e.g. the builtin `elastic` user) and specify the +<>. + +NOTE: The <> API and <> feature are alternative + ways to retrieve information about a {stack} user. Those APIs are not related + to the user lookup feature. + diff --git a/x-pack/docs/en/security/authorization/configuring-authorization-delegation.asciidoc b/x-pack/docs/en/security/authorization/configuring-authorization-delegation.asciidoc index eda2800dceb1b..11c3f86613500 100644 --- a/x-pack/docs/en/security/authorization/configuring-authorization-delegation.asciidoc +++ b/x-pack/docs/en/security/authorization/configuring-authorization-delegation.asciidoc @@ -4,8 +4,8 @@ In some cases, after the user has been authenticated by a realm, we may want to delegate user lookup and assignment of roles to another realm. -Any realm that supports retrieving users (without needing their credentials) -can be used as an authorization realm. +Any realm that supports <> (without needing the +user's credentials) can be used as an authorization realm. For example, a user that is authenticated by the Kerberos realm can be looked up in the LDAP realm. The LDAP realm takes on responsibility for searching the user diff --git a/x-pack/docs/en/security/authorization/mapping-roles.asciidoc b/x-pack/docs/en/security/authorization/mapping-roles.asciidoc index f9c1363e5c01c..d6499bd7e6784 100644 --- a/x-pack/docs/en/security/authorization/mapping-roles.asciidoc +++ b/x-pack/docs/en/security/authorization/mapping-roles.asciidoc @@ -86,7 +86,7 @@ this is a common setting in Elasticsearch, changing its value might effect other schedules in the system. While the _role mapping APIs_ is the preferred way to manage role mappings, using -the `role_mappings.yml` file becomes useful in a couple of use cases: +the `role_mapping.yml` file becomes useful in a couple of use cases: . If you want to define fixed role mappings that no one (besides an administrator with physical access to the {es} nodes) would be able to change. @@ -96,7 +96,7 @@ need to have their roles mapped to them even when the cluster is RED. For instan an administrator that authenticates via LDAP or PKI and gets assigned an administrator role so that they can perform corrective actions. -Please note however, that the role_mappings.yml file is provided +Please note however, that the `role_mapping.yml` file is provided as a minimal administrative function and is not intended to cover and be used to define roles for all use cases. diff --git a/x-pack/docs/en/security/authorization/run-as-privilege.asciidoc b/x-pack/docs/en/security/authorization/run-as-privilege.asciidoc index 5a9fbecf92ab0..093a8814d8f02 100644 --- a/x-pack/docs/en/security/authorization/run-as-privilege.asciidoc +++ b/x-pack/docs/en/security/authorization/run-as-privilege.asciidoc @@ -30,15 +30,16 @@ support `run_as` delegation. `run_as` user:: -- -For the `run_as` user, the the following realms support delegated -`run_as` lookups by username: `native`, `file`, Active Directory, LDAP. - -NOTE: To support `run_as` in the LDAP realm, you have to run in -<>. For Active Directory, you need -to <>. - -Service tokens, the {es} Token Service, PKI, SAML 2.0, OIDC 1.0, Kerberos, JWT, -and API keys do not support delegated `run_as` lookups. +{es} supports `run_as` for any realm that supports user lookup. +Not all realms support user lookup. Refer to the list of <> +and ensure that the realm you wish to use is configured in a manner that +supports user lookup. + +The `run_as` user must be retrieved from a <> - it is not +possible to run as a +<>, +<> or +<>. -- To submit requests on behalf of other users, you need to have the `run_as` @@ -216,4 +217,4 @@ The `authentication_realm` and `lookup_realm` in the response both specify the `native` realm because both the `admin_user` and `analyst_user` are from that realm. If the two users are in different realms, the values for `authentication_realm` and `lookup_realm` are different (such as `pki` and -`native`). \ No newline at end of file +`native`). diff --git a/x-pack/docs/en/security/fips-140-compliance.asciidoc b/x-pack/docs/en/security/fips-140-compliance.asciidoc index c34e9e8e92cfd..785c720dba407 100644 --- a/x-pack/docs/en/security/fips-140-compliance.asciidoc +++ b/x-pack/docs/en/security/fips-140-compliance.asciidoc @@ -8,14 +8,16 @@ government computer security standard used to approve cryptographic modules. {es} offers a FIPS 140-2 compliant mode and as such can run in a FIPS 140-2 configured JVM. -IMPORTANT: The JVM bundled with {es} is not configured for FIPS 140-2. You must -either configure the bundled JVM to run with a FIPS 140-2 certified Java -Security Provider or use an external JVM configured for FIPS 140-2. +IMPORTANT: The JVM bundled with {es} is not configured for FIPS 140-2. You must +configure an external JDK with a FIPS 140-2 certified Java Security Provider. +Refer to the {es} +https://www.elastic.co/support/matrix#matrix_jvm[JVM support matrix] for +supported JVM configurations. After configuring your JVM for FIPS 140-2, you can run {es} in FIPS 140-2 mode by setting the `xpack.security.fips_mode.enabled` to `true` in `elasticsearch.yml`. -For {es}, adherence to FIPS 140-2 is ensured by +For {es}, adherence to FIPS 140-2 is ensured by: - Using FIPS approved / NIST recommended cryptographic algorithms. - Delegating the implementation of these cryptographic algorithms to a NIST diff --git a/x-pack/docs/en/security/fips-java17.asciidoc b/x-pack/docs/en/security/fips-java17.asciidoc index 0cfb8f1a7f0c6..ee1c9bf15eba0 100644 --- a/x-pack/docs/en/security/fips-java17.asciidoc +++ b/x-pack/docs/en/security/fips-java17.asciidoc @@ -5,5 +5,6 @@ If you run in FIPS 140-2 mode, you will either need to request an exception from your security organization to upgrade to {es} {version}, or remain on {es} 7.x until Java 17 is certified. ifeval::["{release-state}"=="released"] -Alternatively, consider using {ess} in the FedRAMP-certified GovCloud region. +Alternatively, consider using {ess} in the +https://www.elastic.co/industries/public-sector/fedramp[FedRAMP-certified GovCloud region]. endif::[] \ No newline at end of file diff --git a/x-pack/docs/en/security/limitations.asciidoc b/x-pack/docs/en/security/limitations.asciidoc index c2d2c8d0dcbd7..5274c450d2058 100644 --- a/x-pack/docs/en/security/limitations.asciidoc +++ b/x-pack/docs/en/security/limitations.asciidoc @@ -50,6 +50,13 @@ When a user's role enables document or <> +** <> +** <> +** <> + * The request cache is disabled for search requests if either of the following are true: ** The role query that defines document level security is <> using a <>. @@ -72,6 +79,8 @@ including the following queries: ** `percolate` query * If suggesters are specified and document level security is enabled, the specified suggesters are ignored. * A search request cannot be profiled if document level security is enabled. +* The <> does not return terms if document +level security is enabled. [discrete] [[alias-limitations]] diff --git a/x-pack/docs/en/security/troubleshooting.asciidoc b/x-pack/docs/en/security/troubleshooting.asciidoc index ed30fc6c14550..b52cb80e38ea7 100644 --- a/x-pack/docs/en/security/troubleshooting.asciidoc +++ b/x-pack/docs/en/security/troubleshooting.asciidoc @@ -107,8 +107,20 @@ The role definition might be missing or invalid. |====================== -To help track down these possibilities, add the following lines to the end of -the `log4j2.properties` configuration file in the `ES_PATH_CONF`: +To help track down these possibilities, enable additional logging to troubleshoot further. +You can enable debug logging by configuring the following persistent setting: + +[source, console] +---- +PUT /_cluster/settings +{ + "persistent": { + "logger.org.elasticsearch.xpack.security.authc": "debug" + } +} +---- + +Alternatively, you can add the following lines to the end of the `log4j2.properties` configuration file in the `ES_PATH_CONF`: [source,properties] ---------------- diff --git a/x-pack/plugin/autoscaling/src/internalClusterTest/java/org/elasticsearch/xpack/autoscaling/storage/ReactiveStorageIT.java b/x-pack/plugin/autoscaling/src/internalClusterTest/java/org/elasticsearch/xpack/autoscaling/storage/ReactiveStorageIT.java index c1e775e569138..4120607def51e 100644 --- a/x-pack/plugin/autoscaling/src/internalClusterTest/java/org/elasticsearch/xpack/autoscaling/storage/ReactiveStorageIT.java +++ b/x-pack/plugin/autoscaling/src/internalClusterTest/java/org/elasticsearch/xpack/autoscaling/storage/ReactiveStorageIT.java @@ -135,6 +135,7 @@ private void testScaleFromEmptyWarm(boolean allocatable) throws Exception { refresh(); } assertThat(capacity().results().get("warm").requiredCapacity().total().storage().getBytes(), equalTo(0L)); + assertThat(capacity().results().get("warm").requiredCapacity().node().storage().getBytes(), equalTo(0L)); assertAcked( client().admin() @@ -149,6 +150,10 @@ private void testScaleFromEmptyWarm(boolean allocatable) throws Exception { } assertThat(capacity().results().get("warm").requiredCapacity().total().storage().getBytes(), Matchers.greaterThan(0L)); + assertThat( + capacity().results().get("warm").requiredCapacity().node().storage().getBytes(), + Matchers.greaterThan(ReactiveStorageDeciderService.NODE_DISK_OVERHEAD) + ); } @@ -196,7 +201,9 @@ public void testScaleFromEmptyLegacy() { refresh(indexName); assertThat(capacity().results().get("warm").requiredCapacity().total().storage().getBytes(), equalTo(0L)); + assertThat(capacity().results().get("warm").requiredCapacity().node().storage().getBytes(), equalTo(0L)); assertThat(capacity().results().get("cold").requiredCapacity().total().storage().getBytes(), equalTo(0L)); + assertThat(capacity().results().get("cold").requiredCapacity().node().storage().getBytes(), equalTo(0L)); assertAcked( client().admin() @@ -210,10 +217,19 @@ public void testScaleFromEmptyLegacy() { ); assertThat(capacity().results().get("warm").requiredCapacity().total().storage().getBytes(), Matchers.greaterThan(0L)); + assertThat( + capacity().results().get("warm").requiredCapacity().node().storage().getBytes(), + Matchers.greaterThan(ReactiveStorageDeciderService.NODE_DISK_OVERHEAD) + ); // this is not desirable, but one of the caveats of not using data tiers in the ILM policy. assertThat(capacity().results().get("cold").requiredCapacity().total().storage().getBytes(), Matchers.greaterThan(0L)); + assertThat( + capacity().results().get("cold").requiredCapacity().node().storage().getBytes(), + Matchers.greaterThan(ReactiveStorageDeciderService.NODE_DISK_OVERHEAD) + ); } + @AwaitsFix(bugUrl = "https://github.com/elastic/elasticsearch/issues/88842") public void testScaleWhileShrinking() throws Exception { internalCluster().startMasterOnlyNode(); final String dataNode1Name = internalCluster().startDataOnlyNode(); @@ -366,6 +382,13 @@ public void testScaleWhileShrinking() throws Exception { setTotalSpace(dataNode1Name, tooLittleSpaceForShrink + 1); assertAcked(client().admin().cluster().prepareReroute()); ensureGreen(); + + client().admin().indices().prepareDelete(indexName).get(); + response = capacity(); + assertThat( + response.results().get(policyName).requiredCapacity().total().storage(), + equalTo(response.results().get(policyName).currentCapacity().total().storage()) + ); } @AwaitsFix(bugUrl = "https://github.com/elastic/elasticsearch/issues/88478") @@ -465,6 +488,13 @@ public void testScaleDuringSplitOrClone() throws Exception { setTotalSpace(dataNode1Name, requiredSpaceForClone); assertAcked(client().admin().cluster().prepareReroute()); ensureGreen(); + + client().admin().indices().prepareDelete(indexName).get(); + response = capacity(); + assertThat( + response.results().get(policyName).requiredCapacity().total().storage().getBytes(), + equalTo(requiredSpaceForClone + enoughSpace) + ); } /** diff --git a/x-pack/plugin/autoscaling/src/main/java/org/elasticsearch/xpack/autoscaling/storage/ReactiveStorageDeciderService.java b/x-pack/plugin/autoscaling/src/main/java/org/elasticsearch/xpack/autoscaling/storage/ReactiveStorageDeciderService.java index 8aca4c652cca6..a2c9610f401f8 100644 --- a/x-pack/plugin/autoscaling/src/main/java/org/elasticsearch/xpack/autoscaling/storage/ReactiveStorageDeciderService.java +++ b/x-pack/plugin/autoscaling/src/main/java/org/elasticsearch/xpack/autoscaling/storage/ReactiveStorageDeciderService.java @@ -143,14 +143,16 @@ public AutoscalingDeciderResult scale(Settings configuration, AutoscalingDecider long unassignedBytes = unassignedBytesUnassignedShards.sizeInBytes(); long maxShardSize = allocationState.maxShardSize(); long maxNodeLockedSize = allocationState.maxNodeLockedSize(); - long minimumNodeSize = nodeSizeForDataBelowLowWatermark(Math.max(maxShardSize, maxNodeLockedSize), diskThresholdSettings) - + NODE_DISK_OVERHEAD; assert assignedBytes >= 0; assert unassignedBytes >= 0; assert maxShardSize >= 0; String message = message(unassignedBytes, assignedBytes); + long requiredTotalStorage = autoscalingCapacity.total().storage().getBytes() + unassignedBytes + assignedBytes; + long minimumNodeSize = requiredTotalStorage > 0L + ? nodeSizeForDataBelowLowWatermark(Math.max(maxShardSize, maxNodeLockedSize), diskThresholdSettings) + NODE_DISK_OVERHEAD + : 0L; AutoscalingCapacity requiredCapacity = AutoscalingCapacity.builder() - .total(autoscalingCapacity.total().storage().getBytes() + unassignedBytes + assignedBytes, null, null) + .total(requiredTotalStorage, null, null) .node(minimumNodeSize, null, null) .build(); return new AutoscalingDeciderResult( @@ -485,20 +487,22 @@ private long nodeLockedSize(IndexMetadata indexMetadata, Metadata metadata) { } else { Index resizeSourceIndex = indexMetadata.getResizeSourceIndex(); if (resizeSourceIndex != null) { - IndexMetadata sourceIndexMetadata = metadata.getIndexSafe(resizeSourceIndex); - // ResizeAllocationDecider only handles clone or split, do the same here. - - if (indexMetadata.getNumberOfShards() >= sourceIndexMetadata.getNumberOfShards()) { - IndexRoutingTable indexRoutingTable = state.getRoutingTable().index(resizeSourceIndex); - long max = 0; - for (int s = 0; s < sourceIndexMetadata.getNumberOfShards(); ++s) { - ShardRouting shard = indexRoutingTable.shard(s).primaryShard(); - long size = sizeOf(shard); - max = Math.max(max, size); + IndexMetadata sourceIndexMetadata = metadata.index(resizeSourceIndex); + // source indicators stay on the index even after started and also after source is deleted. + if (sourceIndexMetadata != null) { + // ResizeAllocationDecider only handles clone or split, do the same here. + if (indexMetadata.getNumberOfShards() >= sourceIndexMetadata.getNumberOfShards()) { + IndexRoutingTable indexRoutingTable = state.getRoutingTable().index(resizeSourceIndex); + long max = 0; + for (int s = 0; s < sourceIndexMetadata.getNumberOfShards(); ++s) { + ShardRouting shard = indexRoutingTable.shard(s).primaryShard(); + long size = sizeOf(shard); + max = Math.max(max, size); + } + + // 2x to account for the extra copy residing on the same node + return max * 2; } - - // 2x to account for the extra copy residing on the same node - return max * 2; } } } diff --git a/x-pack/plugin/ccr/qa/multi-cluster/src/test/java/org/elasticsearch/xpack/ccr/AutoFollowIT.java b/x-pack/plugin/ccr/qa/multi-cluster/src/test/java/org/elasticsearch/xpack/ccr/AutoFollowIT.java index 35fac474d86f3..ffdd40a1bd844 100644 --- a/x-pack/plugin/ccr/qa/multi-cluster/src/test/java/org/elasticsearch/xpack/ccr/AutoFollowIT.java +++ b/x-pack/plugin/ccr/qa/multi-cluster/src/test/java/org/elasticsearch/xpack/ccr/AutoFollowIT.java @@ -236,7 +236,7 @@ public void testDataStreams() throws Exception { int initialNumberOfSuccessfulFollowedIndices = getNumberOfSuccessfulFollowedIndices(); try { // Create auto follow pattern - createAutoFollowPattern(client(), autoFollowPatternName, "logs-mysql-*", "leader_cluster"); + createAutoFollowPattern(client(), autoFollowPatternName, "logs-mysql-*", "leader_cluster", null); // Create data stream and ensure that is is auto followed try (RestClient leaderClient = buildLeaderClient()) { @@ -320,6 +320,121 @@ public void testDataStreams() throws Exception { } } + public void testDataStreamsRenameFollowDataStream() throws Exception { + if ("follow".equals(targetCluster) == false) { + return; + } + + final int numDocs = 64; + final String dataStreamName = "logs-mysql-error"; + final String dataStreamNameFollower = "logs-mysql-error_copy"; + final String autoFollowPatternName = getTestName().toLowerCase(Locale.ROOT); + + int initialNumberOfSuccessfulFollowedIndices = getNumberOfSuccessfulFollowedIndices(); + try { + // Create auto follow pattern + createAutoFollowPattern(client(), autoFollowPatternName, "logs-mysql-*", "leader_cluster", "{{leader_index}}_copy"); + + // Create data stream and ensure that is is auto followed + try (RestClient leaderClient = buildLeaderClient()) { + for (int i = 0; i < numDocs; i++) { + Request indexRequest = new Request("POST", "/" + dataStreamName + "/_doc"); + indexRequest.addParameter("refresh", "true"); + indexRequest.setJsonEntity("{\"@timestamp\": \"" + DATE_FORMAT.format(new Date()) + "\",\"message\":\"abc\"}"); + assertOK(leaderClient.performRequest(indexRequest)); + } + verifyDataStream(leaderClient, dataStreamName, backingIndexName(dataStreamName, 1)); + verifyDocuments(leaderClient, dataStreamName, numDocs); + } + logger.info( + "--> checking {} with index {} has been auto followed to {} with backing index {}", + dataStreamName, + backingIndexName(dataStreamName, 1), + dataStreamNameFollower, + backingIndexName(dataStreamNameFollower, 1) + ); + assertBusy(() -> { + assertThat(getNumberOfSuccessfulFollowedIndices(), equalTo(initialNumberOfSuccessfulFollowedIndices + 1)); + verifyDataStream(client(), dataStreamNameFollower, backingIndexName(dataStreamNameFollower, 1)); + ensureYellow(dataStreamNameFollower); + verifyDocuments(client(), dataStreamNameFollower, numDocs); + }); + + // First rollover and ensure second backing index is replicated: + logger.info("--> rolling over"); + try (RestClient leaderClient = buildLeaderClient()) { + Request rolloverRequest = new Request("POST", "/" + dataStreamName + "/_rollover"); + assertOK(leaderClient.performRequest(rolloverRequest)); + verifyDataStream(leaderClient, dataStreamName, backingIndexName(dataStreamName, 1), backingIndexName(dataStreamName, 2)); + + Request indexRequest = new Request("POST", "/" + dataStreamName + "/_doc"); + indexRequest.addParameter("refresh", "true"); + indexRequest.setJsonEntity("{\"@timestamp\": \"" + DATE_FORMAT.format(new Date()) + "\",\"message\":\"abc\"}"); + assertOK(leaderClient.performRequest(indexRequest)); + verifyDocuments(leaderClient, dataStreamName, numDocs + 1); + } + assertBusy(() -> { + assertThat(getNumberOfSuccessfulFollowedIndices(), equalTo(initialNumberOfSuccessfulFollowedIndices + 2)); + verifyDataStream( + client(), + dataStreamNameFollower, + backingIndexName(dataStreamNameFollower, 1), + backingIndexName(dataStreamNameFollower, 2) + ); + ensureYellow(dataStreamNameFollower); + verifyDocuments(client(), dataStreamNameFollower, numDocs + 1); + }); + + // Second rollover and ensure third backing index is replicated: + logger.info("--> rolling over"); + try (RestClient leaderClient = buildLeaderClient()) { + Request rolloverRequest = new Request("POST", "/" + dataStreamName + "/_rollover"); + assertOK(leaderClient.performRequest(rolloverRequest)); + verifyDataStream( + leaderClient, + dataStreamName, + backingIndexName(dataStreamName, 1), + backingIndexName(dataStreamName, 2), + backingIndexName(dataStreamName, 3) + ); + + Request indexRequest = new Request("POST", "/" + dataStreamName + "/_doc"); + indexRequest.addParameter("refresh", "true"); + indexRequest.setJsonEntity("{\"@timestamp\": \"" + DATE_FORMAT.format(new Date()) + "\",\"message\":\"abc\"}"); + assertOK(leaderClient.performRequest(indexRequest)); + verifyDocuments(leaderClient, dataStreamName, numDocs + 2); + } + assertBusy(() -> { + assertThat(getNumberOfSuccessfulFollowedIndices(), equalTo(initialNumberOfSuccessfulFollowedIndices + 3)); + verifyDataStream( + client(), + dataStreamNameFollower, + backingIndexName(dataStreamNameFollower, 1), + backingIndexName(dataStreamNameFollower, 2), + backingIndexName(dataStreamNameFollower, 3) + ); + ensureYellow(dataStreamNameFollower); + verifyDocuments(client(), dataStreamNameFollower, numDocs + 2); + }); + + } finally { + cleanUpFollower( + List.of( + backingIndexName(dataStreamNameFollower, 1), + backingIndexName(dataStreamNameFollower, 2), + backingIndexName(dataStreamNameFollower, 3) + ), + List.of(dataStreamNameFollower), + List.of(autoFollowPatternName) + ); + cleanUpLeader( + List.of(backingIndexName(dataStreamName, 1), backingIndexName(dataStreamName, 2), backingIndexName(dataStreamName, 3)), + List.of(dataStreamName), + List.of() + ); + } + } + public void testDataStreams_autoFollowAfterDataStreamCreated() throws Exception { if ("follow".equals(targetCluster) == false) { return; @@ -353,7 +468,7 @@ public void testDataStreams_autoFollowAfterDataStreamCreated() throws Exception } // Create auto follow pattern - createAutoFollowPattern(client(), autoFollowPatternName, dataStreamName + "*", "leader_cluster"); + createAutoFollowPattern(client(), autoFollowPatternName, dataStreamName + "*", "leader_cluster", null); // Rollover and ensure only second backing index is replicated: try (RestClient leaderClient = buildLeaderClient()) { @@ -410,7 +525,7 @@ public void testRolloverDataStreamInFollowClusterForbidden() throws Exception { List backingIndexNames = null; try { // Create auto follow pattern - createAutoFollowPattern(client(), autoFollowPatternName, "logs-tomcat-*", "leader_cluster"); + createAutoFollowPattern(client(), autoFollowPatternName, "logs-tomcat-*", "leader_cluster", null); // Create data stream and ensure that is is auto followed try (var leaderClient = buildLeaderClient()) { @@ -531,7 +646,7 @@ public void testRolloverAliasInFollowClusterForbidden() throws Exception { int initialNumberOfSuccessfulFollowedIndices = getNumberOfSuccessfulFollowedIndices(); try { // Create auto follow pattern - createAutoFollowPattern(client(), "test_pattern", "log-*", "leader_cluster"); + createAutoFollowPattern(client(), "test_pattern", "log-*", "leader_cluster", null); // Create leader index and write alias: try (var leaderClient = buildLeaderClient()) { @@ -618,7 +733,7 @@ public void testDataStreamsBiDirectionalReplication() throws Exception { try { // Create auto follow pattern in follow cluster - createAutoFollowPattern(client(), "id1", "logs-*-eu", "leader_cluster"); + createAutoFollowPattern(client(), "id1", "logs-*-eu", "leader_cluster", null); // Create auto follow pattern in leader cluster: try (var leaderClient = buildLeaderClient()) { @@ -658,7 +773,7 @@ public void testDataStreamsBiDirectionalReplication() throws Exception { } assertOK(leaderClient.performRequest(request)); // Then create the actual auto follow pattern: - createAutoFollowPattern(leaderClient, "id2", "logs-*-na", "follower_cluster"); + createAutoFollowPattern(leaderClient, "id2", "logs-*-na", "follower_cluster", null); } var numDocs = 128; @@ -832,7 +947,7 @@ public void testAutoFollowSearchableSnapshotsFails() throws Exception { final String mountedIndex = testPrefix + "-mounted"; try { - createAutoFollowPattern(client(), autoFollowPattern, testPrefix + "-*", "leader_cluster"); + createAutoFollowPattern(client(), autoFollowPattern, testPrefix + "-*", "leader_cluster", null); // Create a regular index on leader try (var leaderClient = buildLeaderClient()) { diff --git a/x-pack/plugin/ccr/qa/multi-cluster/src/test/java/org/elasticsearch/xpack/ccr/FollowIndexIT.java b/x-pack/plugin/ccr/qa/multi-cluster/src/test/java/org/elasticsearch/xpack/ccr/FollowIndexIT.java index 38132b53ed300..db8562bac62ef 100644 --- a/x-pack/plugin/ccr/qa/multi-cluster/src/test/java/org/elasticsearch/xpack/ccr/FollowIndexIT.java +++ b/x-pack/plugin/ccr/qa/multi-cluster/src/test/java/org/elasticsearch/xpack/ccr/FollowIndexIT.java @@ -180,26 +180,6 @@ public void testFollowDataStreamFails() throws Exception { assertThat(failure.getMessage(), containsString("cannot follow [logs-syslog-prod], because it is a DATA_STREAM")); } - public void testChangeBackingIndexNameFails() throws Exception { - if ("follow".equals(targetCluster) == false) { - return; - } - - final String dataStreamName = "logs-foobar-prod"; - try (RestClient leaderClient = buildLeaderClient()) { - Request request = new Request("PUT", "/_data_stream/" + dataStreamName); - assertOK(leaderClient.performRequest(request)); - verifyDataStream(leaderClient, dataStreamName, DataStream.getDefaultBackingIndexName("logs-foobar-prod", 1)); - } - - ResponseException failure = expectThrows( - ResponseException.class, - () -> followIndex(DataStream.getDefaultBackingIndexName("logs-foobar-prod", 1), ".ds-logs-barbaz-prod-000001") - ); - assertThat(failure.getResponse().getStatusLine().getStatusCode(), equalTo(400)); - assertThat(failure.getMessage(), containsString("a backing index name in the local and remote cluster must remain the same")); - } - public void testFollowSearchableSnapshotsFails() throws Exception { final String testPrefix = getTestName().toLowerCase(Locale.ROOT); diff --git a/x-pack/plugin/ccr/qa/security/src/test/java/org/elasticsearch/xpack/ccr/FollowIndexSecurityIT.java b/x-pack/plugin/ccr/qa/security/src/test/java/org/elasticsearch/xpack/ccr/FollowIndexSecurityIT.java index 24eb234716c4e..c2210af7e0a13 100644 --- a/x-pack/plugin/ccr/qa/security/src/test/java/org/elasticsearch/xpack/ccr/FollowIndexSecurityIT.java +++ b/x-pack/plugin/ccr/qa/security/src/test/java/org/elasticsearch/xpack/ccr/FollowIndexSecurityIT.java @@ -281,7 +281,7 @@ public void testUnPromoteAndFollowDataStream() throws Exception { // Setup { - createAutoFollowPattern(adminClient(), "test_pattern", "logs-eu*", "leader_cluster"); + createAutoFollowPattern(adminClient(), "test_pattern", "logs-eu*", "leader_cluster", null); } // Create data stream and ensure that it is auto followed { diff --git a/x-pack/plugin/ccr/qa/src/main/java/org/elasticsearch/xpack/ccr/ESCCRRestTestCase.java b/x-pack/plugin/ccr/qa/src/main/java/org/elasticsearch/xpack/ccr/ESCCRRestTestCase.java index f7df63db15f97..b95d9f60c62d9 100644 --- a/x-pack/plugin/ccr/qa/src/main/java/org/elasticsearch/xpack/ccr/ESCCRRestTestCase.java +++ b/x-pack/plugin/ccr/qa/src/main/java/org/elasticsearch/xpack/ccr/ESCCRRestTestCase.java @@ -335,7 +335,13 @@ protected static List verifyDataStream(final RestClient client, final St return List.copyOf(actualBackingIndices); } - protected static void createAutoFollowPattern(RestClient client, String name, String pattern, String remoteCluster) throws IOException { + protected static void createAutoFollowPattern( + RestClient client, + String name, + String pattern, + String remoteCluster, + String followIndexPattern + ) throws IOException { Request request = new Request("PUT", "/_ccr/auto_follow/" + name); try (XContentBuilder bodyBuilder = JsonXContent.contentBuilder()) { bodyBuilder.startObject(); @@ -345,6 +351,9 @@ protected static void createAutoFollowPattern(RestClient client, String name, St bodyBuilder.value(pattern); } bodyBuilder.endArray(); + if (followIndexPattern != null) { + bodyBuilder.field("follow_index_pattern", followIndexPattern); + } bodyBuilder.field("remote_cluster", remoteCluster); } bodyBuilder.endObject(); diff --git a/x-pack/plugin/ccr/src/main/java/org/elasticsearch/xpack/ccr/action/AutoFollowCoordinator.java b/x-pack/plugin/ccr/src/main/java/org/elasticsearch/xpack/ccr/action/AutoFollowCoordinator.java index a53ea9dc69039..b11fafd01f6b9 100644 --- a/x-pack/plugin/ccr/src/main/java/org/elasticsearch/xpack/ccr/action/AutoFollowCoordinator.java +++ b/x-pack/plugin/ccr/src/main/java/org/elasticsearch/xpack/ccr/action/AutoFollowCoordinator.java @@ -19,6 +19,7 @@ import org.elasticsearch.cluster.ClusterState; import org.elasticsearch.cluster.ClusterStateListener; import org.elasticsearch.cluster.ClusterStateUpdateTask; +import org.elasticsearch.cluster.metadata.DataStream; import org.elasticsearch.cluster.metadata.IndexAbstraction; import org.elasticsearch.cluster.metadata.IndexMetadata; import org.elasticsearch.cluster.metadata.Metadata; @@ -61,6 +62,8 @@ import java.util.function.Function; import java.util.function.LongSupplier; import java.util.function.Supplier; +import java.util.regex.Matcher; +import java.util.regex.Pattern; import java.util.stream.Collectors; import static org.elasticsearch.core.Strings.format; @@ -72,9 +75,24 @@ */ public class AutoFollowCoordinator extends AbstractLifecycleComponent implements ClusterStateListener { + /** + * This is the string that will be replaced by the leader index name for a backing index or data + * stream. It allows auto-following to automatically rename an index or data stream when + * automatically followed. For example, using "{{leader_index}}_copy" for the follow pattern + * means that a data stream called "logs-foo-bar" would be renamed "logs-foo-bar_copy" when + * replicated, and a backing index called ".ds-logs-foo-bar-2022-02-02-000001" would be renamed + * to ".ds-logs-foo-bar_copy-2022-02-02-000001". + * See {@link AutoFollower#getFollowerIndexName} for the entire usage. + */ + public static final String AUTO_FOLLOW_PATTERN_REPLACEMENT = "{{leader_index}}"; + private static final Logger LOGGER = LogManager.getLogger(AutoFollowCoordinator.class); private static final int MAX_AUTO_FOLLOW_ERRORS = 256; + private static final Pattern DS_BACKING_PATTERN = Pattern.compile( + "^(.*?" + DataStream.BACKING_INDEX_PREFIX + ")(.+)-(\\d{4}.\\d{2}.\\d{2})(-[\\d]+)?$" + ); + private final Client client; private final ClusterService clusterService; private final CcrLicenseChecker ccrLicenseChecker; @@ -563,6 +581,12 @@ private void autoFollowIndices( cleanFollowedRemoteIndices(remoteClusterState, patterns); } + /** + * Go through all the leader indices that need to be followed, ensuring that they are + * auto-followed by only a single pattern, have soft-deletes enabled, are not + * searchable snapshots, and are not already followed. If all of those conditions are met, + * then follow the indices. + */ private void checkAutoFollowPattern( String autoFollowPattenName, String remoteClusterString, @@ -582,8 +606,13 @@ private void checkAutoFollowPattern( leaderIndicesToFollow.size() ); + // Loop through all the as-of-yet-unfollowed indices from the leader for (final Index indexToFollow : leaderIndicesToFollow) { + // Look up the abstraction for the given index, e.g., an index ".ds-foo" could look + // up the Data Stream "foo" IndexAbstraction indexAbstraction = remoteMetadata.getIndicesLookup().get(indexToFollow.getName()); + // Ensure that the remote cluster doesn't have other patterns + // that would follow the index, there can be only one. List otherMatchingPatterns = patternsForTheSameRemoteCluster.stream() .filter(otherPattern -> otherPattern.v2().match(indexAbstraction)) .map(Tuple::v1) @@ -605,6 +634,7 @@ private void checkAutoFollowPattern( ); } else { final IndexMetadata leaderIndexMetadata = remoteMetadata.getIndexSafe(indexToFollow); + // First ensure that the index on the leader that we want to follow has soft-deletes enabled if (IndexSettings.INDEX_SOFT_DELETES_SETTING.get(leaderIndexMetadata.getSettings()) == false) { String message = String.format( Locale.ROOT, @@ -639,10 +669,12 @@ private void checkAutoFollowPattern( error -> groupedListener.onResponse(new Tuple<>(indexToFollow, error)) ); } else { + // Finally, if there are no reasons why we cannot follow the leader index, perform the follow. followLeaderIndex( autoFollowPattenName, remoteClusterString, indexToFollow, + indexAbstraction, autoFollowPattern, headers, error -> groupedListener.onResponse(new Tuple<>(indexToFollow, error)) @@ -669,22 +701,32 @@ private static boolean leaderIndexAlreadyFollowed(AutoFollowPattern autoFollowPa return false; } - private void followLeaderIndex( - String autoFollowPattenName, - String remoteClusterString, + /** + * Given a remote cluster, index that will be followed (and its abstraction), as well as an + * {@link AutoFollowPattern}, generate the internal follow request for following the index. + */ + static PutFollowAction.Request generateRequest( + String remoteCluster, Index indexToFollow, - AutoFollowPattern pattern, - Map headers, - Consumer onResult + IndexAbstraction indexAbstraction, + AutoFollowPattern pattern ) { final String leaderIndexName = indexToFollow.getName(); final String followIndexName = getFollowerIndexName(pattern, leaderIndexName); PutFollowAction.Request request = new PutFollowAction.Request(); - request.setRemoteCluster(remoteClusterString); + request.setRemoteCluster(remoteCluster); request.setLeaderIndex(indexToFollow.getName()); request.setFollowerIndex(followIndexName); request.setSettings(pattern.getSettings()); + // If there was a pattern specified for renaming the backing index, and this index is + // part of a data stream, then send the new data stream name as part of the request. + if (pattern.getFollowIndexPattern() != null && indexAbstraction.getParentDataStream() != null) { + String dataStreamName = indexAbstraction.getParentDataStream().getDataStream().getName(); + // Send the follow index pattern as the data stream pattern, so that data streams can be + // renamed accordingly (not only the backing indices) + request.setDataStreamName(pattern.getFollowIndexPattern().replace(AUTO_FOLLOW_PATTERN_REPLACEMENT, dataStreamName)); + } request.getParameters().setMaxReadRequestOperationCount(pattern.getMaxReadRequestOperationCount()); request.getParameters().setMaxReadRequestSize(pattern.getMaxReadRequestSize()); request.getParameters().setMaxOutstandingReadRequests(pattern.getMaxOutstandingReadRequests()); @@ -697,9 +739,23 @@ private void followLeaderIndex( request.getParameters().setReadPollTimeout(pattern.getReadPollTimeout()); request.masterNodeTimeout(TimeValue.MAX_VALUE); + return request; + } + + private void followLeaderIndex( + String autoFollowPattenName, + String remoteClusterString, + Index indexToFollow, + IndexAbstraction indexAbstraction, + AutoFollowPattern pattern, + Map headers, + Consumer onResult + ) { + PutFollowAction.Request request = generateRequest(remoteClusterString, indexToFollow, indexAbstraction, pattern); + // Execute if the create and follow api call succeeds: Runnable successHandler = () -> { - LOGGER.info("auto followed leader index [{}] as follow index [{}]", indexToFollow, followIndexName); + LOGGER.info("auto followed leader index [{}] as follow index [{}]", indexToFollow, request.getFollowerIndex()); // This function updates the auto follow metadata in the cluster to record that the leader index has been followed: // (so that we do not try to follow it in subsequent auto follow runs) @@ -731,6 +787,22 @@ private void finalise(int slot, AutoFollowResult result, final Thread thread) { } } + /** + * Given an auto following pattern for a set of indices and the cluster state from a remote + * cluster, return the list of indices that need to be followed. The list of followed index + * UUIDs contains indices that have already been followed, so the returned list will only + * contain "new" indices from the leader that need to be followed. + * + * When looking up the name of the index to see if it matches one of the patterns, the index + * abstraction ({@link IndexAbstraction}) of the index is used for comparison, this means + * that if an index named ".ds-foo" was part of a data stream "foo", then an auto-follow + * pattern of "f*" would allow the ".ds-foo" index to be returned. + * + * @param autoFollowPattern pattern to check indices that may need to be followed + * @param remoteClusterState state from the remote ES cluster + * @param followedIndexUUIDs a collection of UUIDs of indices already being followed + * @return any new indices on the leader that need to be followed + */ static List getLeaderIndicesToFollow( AutoFollowPattern autoFollowPattern, ClusterState remoteClusterState, @@ -760,9 +832,45 @@ static List getLeaderIndicesToFollow( return leaderIndicesToFollow; } + /** + * Returns the new name for the follower index. If the auto-follow configuration includes a + * follow index pattern, the text "{@code {{leader_index}}}" is replaced with the original + * index name, so a leader index called "foo" and a pattern of "{{leader_index}}_copy" + * becomes a new follower index called "foo_copy". + */ static String getFollowerIndexName(AutoFollowPattern autoFollowPattern, String leaderIndexName) { - if (autoFollowPattern.getFollowIndexPattern() != null) { - return autoFollowPattern.getFollowIndexPattern().replace("{{leader_index}}", leaderIndexName); + final String followPattern = autoFollowPattern.getFollowIndexPattern(); + if (followPattern != null) { + if (leaderIndexName.contains(DataStream.BACKING_INDEX_PREFIX)) { + // The index being replicated is a data stream backing index, so it's something + // like: .ds--20XX-mm-dd-NNNNNN + // + // However, we cannot just replace the name with the proposed follow index + // pattern, or else we'll end up with something like ".ds-logs-foo-bar-2022-02-02-000001_copy" + // for "{{leader_index}}_copy", which will cause problems because it doesn't + // follow a parseable pattern. Instead it would be better to rename it as though + // the data stream name was the leader index name, ending up with + // ".ds-logs-foo-bar_copy-2022-02-02-000001" as the final index name. + Matcher m = DS_BACKING_PATTERN.matcher(leaderIndexName); + if (m.find()) { + return m.group(1) + // Prefix including ".ds-" + followPattern.replace(AUTO_FOLLOW_PATTERN_REPLACEMENT, m.group(2)) + // Data stream name changed + "-" + // Hyphen separator + m.group(3) + // Date math + m.group(4); + } else { + throw new IllegalArgumentException( + "unable to determine follower index name from leader index name [" + + leaderIndexName + + "] and follow index pattern: [" + + followPattern + + "], index appears to follow a regular data stream backing pattern, but could not be parsed" + ); + } + } else { + // If the index does nat contain a `.ds-`, then rename it as usual. + return followPattern.replace("{{leader_index}}", leaderIndexName); + } } else { return leaderIndexName; } diff --git a/x-pack/plugin/ccr/src/main/java/org/elasticsearch/xpack/ccr/action/TransportPutFollowAction.java b/x-pack/plugin/ccr/src/main/java/org/elasticsearch/xpack/ccr/action/TransportPutFollowAction.java index 88301c49c2101..b95e03eb09f58 100644 --- a/x-pack/plugin/ccr/src/main/java/org/elasticsearch/xpack/ccr/action/TransportPutFollowAction.java +++ b/x-pack/plugin/ccr/src/main/java/org/elasticsearch/xpack/ccr/action/TransportPutFollowAction.java @@ -25,6 +25,7 @@ import org.elasticsearch.cluster.metadata.IndexNameExpressionResolver; import org.elasticsearch.cluster.metadata.Metadata; import org.elasticsearch.cluster.service.ClusterService; +import org.elasticsearch.common.Strings; import org.elasticsearch.common.inject.Inject; import org.elasticsearch.common.settings.IndexScopedSettings; import org.elasticsearch.common.settings.Settings; @@ -169,17 +170,6 @@ private void createFollowerIndex( return; } - if (remoteDataStream != null) { - // when following a backing index then the names of the backing index must be remain the same in the local - // and remote cluster. - if (request.getLeaderIndex().equals(request.getFollowerIndex()) == false) { - listener.onFailure( - new IllegalArgumentException("a backing index name in the local and remote cluster must remain the same") - ); - return; - } - } - final Settings overrideSettings = Settings.builder() .put(IndexMetadata.SETTING_INDEX_PROVIDED_NAME, request.getFollowerIndex()) .put(CcrSettings.CCR_FOLLOWING_INDEX_SETTING.getKey(), true) @@ -215,15 +205,37 @@ protected void doRun() { (delegatedListener, response) -> afterRestoreStarted(clientWithHeaders, request, delegatedListener, response) ); if (remoteDataStream == null) { + // If the index we're following is not part of a data stream, start the + // restoration of the index normally. restoreService.restoreSnapshot(restoreRequest, delegatelistener); } else { String followerIndexName = request.getFollowerIndex(); + // This method is used to update the metadata in the same cluster state + // update as the snapshot is restored. BiConsumer updater = (currentState, mdBuilder) -> { - DataStream localDataStream = mdBuilder.dataStreamMetadata().dataStreams().get(remoteDataStream.getName()); - Index followerIndex = mdBuilder.get(followerIndexName).getIndex(); - assert followerIndex != null; + final String localDataStreamName; + + // If we have been given a data stream name, use that name for the local + // data stream. See the javadoc for AUTO_FOLLOW_PATTERN_REPLACEMENT + // for more info. + final String dsName = request.getDataStreamName(); + if (Strings.hasText(dsName)) { + localDataStreamName = dsName; + } else { + // There was no specified name, use the original data stream name. + localDataStreamName = remoteDataStream.getName(); + } + final DataStream localDataStream = mdBuilder.dataStreamMetadata().dataStreams().get(localDataStreamName); + final Index followerIndex = mdBuilder.get(followerIndexName).getIndex(); + assert followerIndex != null + : "expected followerIndex " + followerIndexName + " to exist in the state, but it did not"; - DataStream updatedDataStream = updateLocalDataStream(followerIndex, localDataStream, remoteDataStream); + final DataStream updatedDataStream = updateLocalDataStream( + followerIndex, + localDataStream, + localDataStreamName, + remoteDataStream + ); mdBuilder.put(updatedDataStream); }; restoreService.restoreSnapshot(restoreRequest, delegatelistener, updater); @@ -303,12 +315,23 @@ private void initiateFollowing( ); } - static DataStream updateLocalDataStream(Index backingIndexToFollow, DataStream localDataStream, DataStream remoteDataStream) { + /** + * Given the backing index that the follower is going to follow, the local data stream (if it + * exists) and the remote data stream, return the new local data stream for the local cluster + * (the follower) updated with whichever information is necessary to restore the new + * soon-to-be-followed index. + */ + static DataStream updateLocalDataStream( + Index backingIndexToFollow, + DataStream localDataStream, + String localDataStreamName, + DataStream remoteDataStream + ) { if (localDataStream == null) { // The data stream and the backing indices have been created and validated in the remote cluster, // just copying the data stream is in this case safe. return new DataStream( - remoteDataStream.getName(), + localDataStreamName, List.of(backingIndexToFollow), remoteDataStream.getGeneration(), remoteDataStream.getMetadata(), diff --git a/x-pack/plugin/ccr/src/test/java/org/elasticsearch/xpack/ccr/action/AutoFollowCoordinatorTests.java b/x-pack/plugin/ccr/src/test/java/org/elasticsearch/xpack/ccr/action/AutoFollowCoordinatorTests.java index 825c2abeb95ac..f8cca99ce5e8e 100644 --- a/x-pack/plugin/ccr/src/test/java/org/elasticsearch/xpack/ccr/action/AutoFollowCoordinatorTests.java +++ b/x-pack/plugin/ccr/src/test/java/org/elasticsearch/xpack/ccr/action/AutoFollowCoordinatorTests.java @@ -14,6 +14,7 @@ import org.elasticsearch.cluster.ClusterState; import org.elasticsearch.cluster.metadata.DataStream; import org.elasticsearch.cluster.metadata.DataStreamTestHelper; +import org.elasticsearch.cluster.metadata.IndexAbstraction; import org.elasticsearch.cluster.metadata.IndexMetadata; import org.elasticsearch.cluster.metadata.Metadata; import org.elasticsearch.cluster.routing.IndexRoutingTable; @@ -32,6 +33,7 @@ import org.elasticsearch.core.TimeValue; import org.elasticsearch.core.Tuple; import org.elasticsearch.index.Index; +import org.elasticsearch.index.IndexMode; import org.elasticsearch.index.IndexSettings; import org.elasticsearch.index.shard.ShardId; import org.elasticsearch.test.ESTestCase; @@ -74,6 +76,7 @@ import static org.elasticsearch.xpack.ccr.action.AutoFollowCoordinator.AutoFollower.recordLeaderIndexAsFollowFunction; import static org.hamcrest.Matchers.anEmptyMap; import static org.hamcrest.Matchers.containsInAnyOrder; +import static org.hamcrest.Matchers.containsString; import static org.hamcrest.Matchers.empty; import static org.hamcrest.Matchers.equalTo; import static org.hamcrest.Matchers.greaterThan; @@ -1001,6 +1004,331 @@ public void testGetFollowerIndexName() { null ); assertThat(AutoFollower.getFollowerIndexName(autoFollowPattern, "metrics-0"), equalTo("eu-metrics-0")); + + // Test that index of data stream type name works correctly: + autoFollowPattern = new AutoFollowPattern( + "remote", + List.of("logs-*"), + List.of(), + "{{leader_index}}_copy", + Settings.EMPTY, + true, + null, + null, + null, + null, + null, + null, + null, + null, + null, + null + ); + assertThat( + AutoFollower.getFollowerIndexName(autoFollowPattern, ".ds-logs-foo-bar-2022-02-01-123456"), + equalTo(".ds-logs-foo-bar_copy-2022-02-01-123456") + ); + + autoFollowPattern = new AutoFollowPattern( + "remote", + List.of("logs-*"), + List.of(), + "prepend_{{leader_index}}", + Settings.EMPTY, + true, + null, + null, + null, + null, + null, + null, + null, + null, + null, + null + ); + assertThat( + AutoFollower.getFollowerIndexName(autoFollowPattern, ".ds-logs-foo-bar-2022-02-01-123456"), + equalTo(".ds-prepend_logs-foo-bar-2022-02-01-123456") + ); + + } + + public void testGenerateRequest() { + // Renaming with a suffix and normal pattern backing indices + { + AutoFollowPattern pattern = new AutoFollowPattern( + "remote", + List.of("logs-*"), + List.of(), + "{{leader_index}}_copy", + Settings.EMPTY, + true, + null, + null, + null, + null, + null, + null, + null, + null, + null, + null + ); + + Index index = new Index(".ds-logs-foo-bar-2022-02-01-123456", "uuid"); + IndexAbstraction indexAbstraction = new IndexAbstraction.ConcreteIndex( + IndexMetadata.builder(index.getName()) + .settings( + Settings.builder() + .put(IndexMetadata.SETTING_NUMBER_OF_SHARDS, 1) + .put(IndexMetadata.SETTING_NUMBER_OF_REPLICAS, 1) + .put(IndexMetadata.SETTING_VERSION_CREATED, Version.CURRENT) + .put(IndexMetadata.SETTING_INDEX_UUID, index.getUUID()) + .build() + ) + .build(), + new IndexAbstraction.DataStream( + new DataStream("logs-foo-bar", List.of(index), 1, Map.of(), false, false, false, true, IndexMode.STANDARD) + ) + ); + + PutFollowAction.Request request = AutoFollower.generateRequest("remote", index, indexAbstraction, pattern); + assertThat(request.getRemoteCluster(), equalTo("remote")); + assertThat(request.getFollowerIndex(), equalTo(".ds-logs-foo-bar_copy-2022-02-01-123456")); + assertThat(request.getLeaderIndex(), equalTo(".ds-logs-foo-bar-2022-02-01-123456")); + assertThat(request.getDataStreamName(), equalTo("logs-foo-bar_copy")); + } + + // Renaming with a prefix and normal pattern backing indices + { + AutoFollowPattern pattern = new AutoFollowPattern( + "remote", + List.of("logs-*"), + List.of(), + "copy_{{leader_index}}", + Settings.EMPTY, + true, + null, + null, + null, + null, + null, + null, + null, + null, + null, + null + ); + + Index index = new Index(".ds-logs-foo-bar-2022-02-01-123456", "uuid"); + IndexAbstraction indexAbstraction = new IndexAbstraction.ConcreteIndex( + IndexMetadata.builder(index.getName()) + .settings( + Settings.builder() + .put(IndexMetadata.SETTING_NUMBER_OF_SHARDS, 1) + .put(IndexMetadata.SETTING_NUMBER_OF_REPLICAS, 1) + .put(IndexMetadata.SETTING_VERSION_CREATED, Version.CURRENT) + .put(IndexMetadata.SETTING_INDEX_UUID, index.getUUID()) + .build() + ) + .build(), + new IndexAbstraction.DataStream( + new DataStream("logs-foo-bar", List.of(index), 1, Map.of(), false, false, false, true, IndexMode.STANDARD) + ) + ); + + PutFollowAction.Request request = AutoFollower.generateRequest("remote", index, indexAbstraction, pattern); + assertThat(request.getRemoteCluster(), equalTo("remote")); + assertThat(request.getFollowerIndex(), equalTo(".ds-copy_logs-foo-bar-2022-02-01-123456")); + assertThat(request.getLeaderIndex(), equalTo(".ds-logs-foo-bar-2022-02-01-123456")); + assertThat(request.getDataStreamName(), equalTo("copy_logs-foo-bar")); + } + + // Renaming with a suffix and irregular pattern backing indices + { + AutoFollowPattern pattern = new AutoFollowPattern( + "remote", + List.of("logs-*"), + List.of(), + "{{leader_index}}_copy", + Settings.EMPTY, + true, + null, + null, + null, + null, + null, + null, + null, + null, + null, + null + ); + + Index index = new Index("my-backing-index", "uuid"); + IndexAbstraction indexAbstraction = new IndexAbstraction.ConcreteIndex( + IndexMetadata.builder(index.getName()) + .settings( + Settings.builder() + .put(IndexMetadata.SETTING_NUMBER_OF_SHARDS, 1) + .put(IndexMetadata.SETTING_NUMBER_OF_REPLICAS, 1) + .put(IndexMetadata.SETTING_VERSION_CREATED, Version.CURRENT) + .put(IndexMetadata.SETTING_INDEX_UUID, index.getUUID()) + .build() + ) + .build(), + new IndexAbstraction.DataStream( + new DataStream("logs-foo-bar", List.of(index), 1, Map.of(), false, false, false, true, IndexMode.STANDARD) + ) + ); + + PutFollowAction.Request request = AutoFollower.generateRequest("remote", index, indexAbstraction, pattern); + assertThat(request.getRemoteCluster(), equalTo("remote")); + assertThat(request.getFollowerIndex(), equalTo("my-backing-index_copy")); + assertThat(request.getLeaderIndex(), equalTo("my-backing-index")); + assertThat(request.getDataStreamName(), equalTo("logs-foo-bar_copy")); + } + + // Renaming with a suffix but not part of a data stream + { + AutoFollowPattern pattern = new AutoFollowPattern( + "remote", + List.of("logs-*"), + List.of(), + "{{leader_index}}_copy", + Settings.EMPTY, + true, + null, + null, + null, + null, + null, + null, + null, + null, + null, + null + ); + + Index index = new Index(".ds-logs-foo-bar-2022-02-01-123456", "uuid"); + IndexAbstraction indexAbstraction = new IndexAbstraction.ConcreteIndex( + IndexMetadata.builder(index.getName()) + .settings( + Settings.builder() + .put(IndexMetadata.SETTING_NUMBER_OF_SHARDS, 1) + .put(IndexMetadata.SETTING_NUMBER_OF_REPLICAS, 1) + .put(IndexMetadata.SETTING_VERSION_CREATED, Version.CURRENT) + .put(IndexMetadata.SETTING_INDEX_UUID, index.getUUID()) + .build() + ) + .build(), + null + ); + + PutFollowAction.Request request = AutoFollower.generateRequest("remote", index, indexAbstraction, pattern); + assertThat(request.getRemoteCluster(), equalTo("remote")); + assertThat(request.getFollowerIndex(), equalTo(".ds-logs-foo-bar_copy-2022-02-01-123456")); + assertThat(request.getLeaderIndex(), equalTo(".ds-logs-foo-bar-2022-02-01-123456")); + assertThat(request.getDataStreamName(), equalTo(null)); + } + + // Regular backing index, but no renaming + { + AutoFollowPattern pattern = new AutoFollowPattern( + "remote", + List.of("logs-*"), + List.of(), + null, + Settings.EMPTY, + true, + null, + null, + null, + null, + null, + null, + null, + null, + null, + null + ); + + Index index = new Index(".ds-logs-foo-bar-2022-02-01-123456", "uuid"); + IndexAbstraction indexAbstraction = new IndexAbstraction.ConcreteIndex( + IndexMetadata.builder(index.getName()) + .settings( + Settings.builder() + .put(IndexMetadata.SETTING_NUMBER_OF_SHARDS, 1) + .put(IndexMetadata.SETTING_NUMBER_OF_REPLICAS, 1) + .put(IndexMetadata.SETTING_VERSION_CREATED, Version.CURRENT) + .put(IndexMetadata.SETTING_INDEX_UUID, index.getUUID()) + .build() + ) + .build(), + new IndexAbstraction.DataStream( + new DataStream("logs-foo-bar", List.of(index), 1, Map.of(), false, false, false, true, IndexMode.STANDARD) + ) + ); + + PutFollowAction.Request request = AutoFollower.generateRequest("remote", index, indexAbstraction, pattern); + assertThat(request.getRemoteCluster(), equalTo("remote")); + assertThat(request.getFollowerIndex(), equalTo(".ds-logs-foo-bar-2022-02-01-123456")); + assertThat(request.getLeaderIndex(), equalTo(".ds-logs-foo-bar-2022-02-01-123456")); + assertThat(request.getDataStreamName(), equalTo(null)); + } + + // Renaming with a suffix and just the worst named backing indices + { + AutoFollowPattern pattern = new AutoFollowPattern( + "remote", + List.of("logs-*"), + List.of(), + "{{leader_index}}_copy", + Settings.EMPTY, + true, + null, + null, + null, + null, + null, + null, + null, + null, + null, + null + ); + + Index index = new Index("my-.ds-backing-index", "uuid"); + IndexAbstraction indexAbstraction = new IndexAbstraction.ConcreteIndex( + IndexMetadata.builder(index.getName()) + .settings( + Settings.builder() + .put(IndexMetadata.SETTING_NUMBER_OF_SHARDS, 1) + .put(IndexMetadata.SETTING_NUMBER_OF_REPLICAS, 1) + .put(IndexMetadata.SETTING_VERSION_CREATED, Version.CURRENT) + .put(IndexMetadata.SETTING_INDEX_UUID, index.getUUID()) + .build() + ) + .build(), + new IndexAbstraction.DataStream( + new DataStream("logs-foo-bar", List.of(index), 1, Map.of(), false, false, false, true, IndexMode.STANDARD) + ) + ); + + IllegalArgumentException e = expectThrows( + IllegalArgumentException.class, + () -> AutoFollower.generateRequest("remote", index, indexAbstraction, pattern) + ); + assertThat( + e.getMessage(), + containsString( + "unable to determine follower index name from leader index name " + + "[my-.ds-backing-index] and follow index pattern: [{{leader_index}}_copy]" + + ", index appears to follow a regular data stream backing pattern, but could not be parsed" + ) + ); + } } public void testStats() { diff --git a/x-pack/plugin/ccr/src/test/java/org/elasticsearch/xpack/ccr/action/FollowParametersTests.java b/x-pack/plugin/ccr/src/test/java/org/elasticsearch/xpack/ccr/action/FollowParametersTests.java index fd92bc3ecff99..93879f2dfb842 100644 --- a/x-pack/plugin/ccr/src/test/java/org/elasticsearch/xpack/ccr/action/FollowParametersTests.java +++ b/x-pack/plugin/ccr/src/test/java/org/elasticsearch/xpack/ccr/action/FollowParametersTests.java @@ -38,6 +38,11 @@ protected Writeable.Reader instanceReader() { return FollowParameters::new; } + @Override + protected FollowParameters mutateInstance(FollowParameters instance) { + return randomInstance(); + } + static FollowParameters randomInstance() { FollowParameters followParameters = new FollowParameters(); followParameters.setMaxOutstandingReadRequests(randomIntBetween(0, Integer.MAX_VALUE)); diff --git a/x-pack/plugin/ccr/src/test/java/org/elasticsearch/xpack/ccr/action/PutFollowActionRequestTests.java b/x-pack/plugin/ccr/src/test/java/org/elasticsearch/xpack/ccr/action/PutFollowActionRequestTests.java index 50fe5ce87182e..ab84ca9fd9ca7 100644 --- a/x-pack/plugin/ccr/src/test/java/org/elasticsearch/xpack/ccr/action/PutFollowActionRequestTests.java +++ b/x-pack/plugin/ccr/src/test/java/org/elasticsearch/xpack/ccr/action/PutFollowActionRequestTests.java @@ -11,6 +11,7 @@ import org.elasticsearch.common.io.stream.Writeable; import org.elasticsearch.common.settings.Settings; import org.elasticsearch.test.AbstractSerializingTestCase; +import org.elasticsearch.test.ESTestCase; import org.elasticsearch.xcontent.XContentParser; import org.elasticsearch.xcontent.XContentType; import org.elasticsearch.xpack.core.ccr.action.PutFollowAction; @@ -38,6 +39,7 @@ protected PutFollowAction.Request createTestInstance() { Settings.builder().put(IndexMetadata.INDEX_NUMBER_OF_REPLICAS_SETTING.getKey(), randomIntBetween(0, 4)).build() ); ResumeFollowActionRequestTests.generateFollowParameters(request.getParameters()); + request.setDataStreamName(randomAlphaOfLength(4)); return request; } @@ -53,6 +55,7 @@ protected PutFollowAction.Request createXContextTestInstance(XContentType xConte ); request.setFollowerIndex("followerIndex"); ResumeFollowActionRequestTests.generateFollowParameters(request.getParameters()); + request.setDataStreamName(randomAlphaOfLength(4)); return request; } @@ -61,6 +64,40 @@ protected PutFollowAction.Request doParseInstance(XContentParser parser) throws return PutFollowAction.Request.fromXContent(parser, "followerIndex", ActiveShardCount.DEFAULT); } + @Override + protected PutFollowAction.Request mutateInstance(PutFollowAction.Request instance) throws IOException { + PutFollowAction.Request request = new PutFollowAction.Request(); + request.setFollowerIndex(instance.getFollowerIndex()); + request.waitForActiveShards(instance.waitForActiveShards()); + request.setRemoteCluster(instance.getRemoteCluster()); + request.setLeaderIndex(instance.getLeaderIndex()); + request.setSettings(instance.getSettings()); + request.setParameters(instance.getParameters()); + request.setDataStreamName(instance.getDataStreamName()); + + switch (randomIntBetween(0, 6)) { + case 0 -> request.setFollowerIndex(randomAlphaOfLength(5)); + case 1 -> request.waitForActiveShards(new ActiveShardCount(randomIntBetween(3, 5))); + case 2 -> request.setRemoteCluster(randomAlphaOfLength(5)); + case 3 -> request.setLeaderIndex(randomAlphaOfLength(5)); + case 4 -> request.setSettings( + Settings.builder() + .put( + IndexMetadata.INDEX_NUMBER_OF_REPLICAS_SETTING.getKey(), + randomValueOtherThan( + IndexMetadata.INDEX_NUMBER_OF_REPLICAS_SETTING.get(request.getSettings()), + ESTestCase::randomInt + ) + ) + .build() + ); + case 5 -> request.setParameters(FollowParametersTests.randomInstance()); + case 6 -> request.setDataStreamName(randomAlphaOfLength(5)); + default -> throw new AssertionError("failed branch"); + } + return request; + } + @Override protected boolean supportsUnknownFields() { return false; diff --git a/x-pack/plugin/ccr/src/test/java/org/elasticsearch/xpack/ccr/action/TransportPutFollowActionTests.java b/x-pack/plugin/ccr/src/test/java/org/elasticsearch/xpack/ccr/action/TransportPutFollowActionTests.java index 955623bdda743..61050b4172119 100644 --- a/x-pack/plugin/ccr/src/test/java/org/elasticsearch/xpack/ccr/action/TransportPutFollowActionTests.java +++ b/x-pack/plugin/ccr/src/test/java/org/elasticsearch/xpack/ccr/action/TransportPutFollowActionTests.java @@ -24,7 +24,12 @@ public class TransportPutFollowActionTests extends ESTestCase { public void testCreateNewLocalDataStream() { DataStream remoteDataStream = generateDataSteam("logs-foobar", 3, false); Index backingIndexToFollow = remoteDataStream.getIndices().get(remoteDataStream.getIndices().size() - 1); - DataStream result = TransportPutFollowAction.updateLocalDataStream(backingIndexToFollow, null, remoteDataStream); + DataStream result = TransportPutFollowAction.updateLocalDataStream( + backingIndexToFollow, + null, + remoteDataStream.getName(), + remoteDataStream + ); assertThat(result.getName(), equalTo(remoteDataStream.getName())); assertThat(result.getTimeStampField(), equalTo(remoteDataStream.getTimeStampField())); assertThat(result.getGeneration(), equalTo(remoteDataStream.getGeneration())); @@ -36,7 +41,12 @@ public void testUpdateLocalDataStream_followNewBackingIndex() { DataStream remoteDataStream = generateDataSteam("logs-foobar", 3, false); DataStream localDataStream = generateDataSteam("logs-foobar", 2, true); Index backingIndexToFollow = remoteDataStream.getIndices().get(remoteDataStream.getIndices().size() - 1); - DataStream result = TransportPutFollowAction.updateLocalDataStream(backingIndexToFollow, localDataStream, remoteDataStream); + DataStream result = TransportPutFollowAction.updateLocalDataStream( + backingIndexToFollow, + localDataStream, + remoteDataStream.getName(), + remoteDataStream + ); assertThat(result.getName(), equalTo(remoteDataStream.getName())); assertThat(result.getTimeStampField(), equalTo(remoteDataStream.getTimeStampField())); assertThat(result.getGeneration(), equalTo(remoteDataStream.getGeneration())); @@ -51,7 +61,12 @@ public void testUpdateLocalDataStream_followOlderBackingIndex() { DataStream remoteDataStream = generateDataSteam("logs-foobar", 5, false); DataStream localDataStream = generateDataSteam("logs-foobar", 5, true, DataStream.getDefaultBackingIndexName("logs-foobar", 5)); Index backingIndexToFollow = remoteDataStream.getIndices().get(0); - DataStream result = TransportPutFollowAction.updateLocalDataStream(backingIndexToFollow, localDataStream, remoteDataStream); + DataStream result = TransportPutFollowAction.updateLocalDataStream( + backingIndexToFollow, + localDataStream, + remoteDataStream.getName(), + remoteDataStream + ); assertThat(result.getName(), equalTo(remoteDataStream.getName())); assertThat(result.getTimeStampField(), equalTo(remoteDataStream.getTimeStampField())); assertThat(result.getGeneration(), equalTo(remoteDataStream.getGeneration())); @@ -62,7 +77,12 @@ public void testUpdateLocalDataStream_followOlderBackingIndex() { // follow second last backing index: localDataStream = result; backingIndexToFollow = remoteDataStream.getIndices().get(remoteDataStream.getIndices().size() - 2); - result = TransportPutFollowAction.updateLocalDataStream(backingIndexToFollow, localDataStream, remoteDataStream); + result = TransportPutFollowAction.updateLocalDataStream( + backingIndexToFollow, + localDataStream, + remoteDataStream.getName(), + remoteDataStream + ); assertThat(result.getName(), equalTo(remoteDataStream.getName())); assertThat(result.getTimeStampField(), equalTo(remoteDataStream.getTimeStampField())); assertThat(result.getGeneration(), equalTo(remoteDataStream.getGeneration())); diff --git a/x-pack/plugin/core/src/internalClusterTest/java/org/elasticsearch/xpack/cluster/routing/allocation/DataTierShardAvailabilityHealthIndicatorIT.java b/x-pack/plugin/core/src/internalClusterTest/java/org/elasticsearch/xpack/cluster/routing/allocation/DataTierShardAvailabilityHealthIndicatorIT.java new file mode 100644 index 0000000000000..49a0b78316e37 --- /dev/null +++ b/x-pack/plugin/core/src/internalClusterTest/java/org/elasticsearch/xpack/cluster/routing/allocation/DataTierShardAvailabilityHealthIndicatorIT.java @@ -0,0 +1,197 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License + * 2.0; you may not use this file except in compliance with the Elastic License + * 2.0. + */ + +package org.elasticsearch.xpack.cluster.routing.allocation; + +import org.elasticsearch.action.admin.cluster.allocation.ClusterAllocationExplanation; +import org.elasticsearch.action.index.IndexRequestBuilder; +import org.elasticsearch.cluster.ClusterState; +import org.elasticsearch.cluster.metadata.IndexMetadata; +import org.elasticsearch.cluster.node.DiscoveryNodeRole; +import org.elasticsearch.cluster.routing.RoutingNodesHelper; +import org.elasticsearch.cluster.routing.ShardRouting; +import org.elasticsearch.cluster.routing.ShardRoutingState; +import org.elasticsearch.cluster.routing.UnassignedInfo; +import org.elasticsearch.cluster.routing.allocation.DataTier; +import org.elasticsearch.cluster.routing.allocation.ShardsAvailabilityHealthIndicatorService; +import org.elasticsearch.common.settings.Settings; +import org.elasticsearch.common.xcontent.XContentHelper; +import org.elasticsearch.core.TimeValue; +import org.elasticsearch.health.Diagnosis; +import org.elasticsearch.health.GetHealthAction; +import org.elasticsearch.health.HealthIndicatorResult; +import org.elasticsearch.health.HealthStatus; +import org.elasticsearch.plugins.Plugin; +import org.elasticsearch.test.ESIntegTestCase; +import org.elasticsearch.test.hamcrest.ElasticsearchAssertions; +import org.elasticsearch.xcontent.XContentType; +import org.elasticsearch.xpack.core.LocalStateCompositeXPackPlugin; + +import java.util.Collection; +import java.util.Collections; +import java.util.List; +import java.util.stream.Collectors; + +import static org.elasticsearch.test.NodeRoles.onlyRole; +import static org.hamcrest.Matchers.equalTo; +import static org.hamcrest.Matchers.hasItem; + +/** + * Contains all integration tests for the {@link org.elasticsearch.cluster.routing.allocation.ShardsAvailabilityHealthIndicatorService} + * that require the data tiers allocation decider logic. + */ +@ESIntegTestCase.ClusterScope(scope = ESIntegTestCase.Scope.TEST, numDataNodes = 0) +public class DataTierShardAvailabilityHealthIndicatorIT extends ESIntegTestCase { + + @Override + protected Collection> nodePlugins() { + return List.of(LocalStateCompositeXPackPlugin.class); + } + + /** + * Verify that the health API returns an "increase tier capacity" diagnosis when an index is created but there aren't enough nodes in + * a tier to host the desired replicas on unique nodes. + */ + public void testIncreaseTierCapacityDiagnosisWhenCreated() throws Exception { + internalCluster().startMasterOnlyNodes(1); + internalCluster().startNodes(1, onlyRole(DiscoveryNodeRole.DATA_HOT_NODE_ROLE)); + ElasticsearchAssertions.assertAcked( + prepareCreate("test").setSettings( + Settings.builder() + .put(IndexMetadata.SETTING_NUMBER_OF_SHARDS, 1) + .put(IndexMetadata.SETTING_NUMBER_OF_REPLICAS, 1) + .put(DataTier.TIER_PREFERENCE, DataTier.DATA_HOT) + ) + ); + ensureYellow("test"); + GetHealthAction.Response healthResponse = client().execute( + GetHealthAction.INSTANCE, + new GetHealthAction.Request(ShardsAvailabilityHealthIndicatorService.NAME, true) + ).get(); + HealthIndicatorResult indicatorResult = healthResponse.findIndicator(ShardsAvailabilityHealthIndicatorService.NAME); + assertThat(indicatorResult.status(), equalTo(HealthStatus.YELLOW)); + assertThat( + indicatorResult.diagnosisList(), + hasItem( + new Diagnosis( + ShardsAvailabilityHealthIndicatorService.ACTION_INCREASE_TIER_CAPACITY_LOOKUP.get(DataTier.DATA_HOT), + List.of("test") + ) + ) + ); + } + + /** + * Verify that the health API returns an "increase tier capacity" diagnosis when enough nodes in a tier leave such that the tier cannot + * host all of an index's replicas on unique nodes. + */ + public void testIncreaseTierCapacityDiagnosisWhenTierShrinksUnexpectedly() throws Exception { + internalCluster().startMasterOnlyNodes(1); + internalCluster().startNodes(2, onlyRole(DiscoveryNodeRole.DATA_HOT_NODE_ROLE)); + ElasticsearchAssertions.assertAcked( + prepareCreate("test").setSettings( + Settings.builder() + .put(IndexMetadata.SETTING_NUMBER_OF_SHARDS, 1) + .put(IndexMetadata.SETTING_NUMBER_OF_REPLICAS, 1) + .put(DataTier.TIER_PREFERENCE, DataTier.DATA_HOT) + .put(UnassignedInfo.INDEX_DELAYED_NODE_LEFT_TIMEOUT_SETTING.getKey(), 0) + ) + ); + ensureGreen("test"); + indexRandomData("test"); + internalCluster().stopNode(findNodeWithReplicaShard("test", 0)); + ensureYellow("test"); + GetHealthAction.Response healthResponse = client().execute( + GetHealthAction.INSTANCE, + new GetHealthAction.Request(ShardsAvailabilityHealthIndicatorService.NAME, true) + ).get(); + ClusterAllocationExplanation explain = client().admin() + .cluster() + .prepareAllocationExplain() + .setIndex("test") + .setShard(0) + .setPrimary(false) + .get() + .getExplanation(); + logger.info(XContentHelper.toXContent(explain, XContentType.JSON, true).utf8ToString()); + HealthIndicatorResult indicatorResult = healthResponse.findIndicator(ShardsAvailabilityHealthIndicatorService.NAME); + assertThat(indicatorResult.status(), equalTo(HealthStatus.YELLOW)); + assertThat( + indicatorResult.diagnosisList(), + hasItem( + new Diagnosis( + ShardsAvailabilityHealthIndicatorService.ACTION_INCREASE_TIER_CAPACITY_LOOKUP.get(DataTier.DATA_HOT), + List.of("test") + ) + ) + ); + } + + /** + * Verify that the health API returns a "YELLOW" status when a node disappears and a shard is unassigned because it is delayed. + */ + public void testRemovingNodeReturnsYellowForDelayedIndex() throws Exception { + internalCluster().startMasterOnlyNodes(1); + internalCluster().startNodes(3, onlyRole(DiscoveryNodeRole.DATA_HOT_NODE_ROLE)); + ElasticsearchAssertions.assertAcked( + prepareCreate("test").setSettings( + Settings.builder() + .put(IndexMetadata.SETTING_NUMBER_OF_SHARDS, 1) + .put(IndexMetadata.SETTING_NUMBER_OF_REPLICAS, 1) + .put(DataTier.TIER_PREFERENCE, DataTier.DATA_HOT) + .put(UnassignedInfo.INDEX_DELAYED_NODE_LEFT_TIMEOUT_SETTING.getKey(), TimeValue.timeValueMinutes(30)) + ) + ); + ensureGreen("test"); + indexRandomData("test"); + internalCluster().stopNode(findNodeWithPrimaryShard("test", 0)); + ensureYellow("test"); + GetHealthAction.Response healthResponse = client().execute( + GetHealthAction.INSTANCE, + new GetHealthAction.Request(ShardsAvailabilityHealthIndicatorService.NAME, true) + ).get(); + HealthIndicatorResult indicatorResult = healthResponse.findIndicator(ShardsAvailabilityHealthIndicatorService.NAME); + assertThat(indicatorResult.status(), equalTo(HealthStatus.YELLOW)); + assertThat(indicatorResult.diagnosisList().size(), equalTo(1)); + assertThat( + indicatorResult.diagnosisList(), + hasItem(new Diagnosis(ShardsAvailabilityHealthIndicatorService.DIAGNOSIS_WAIT_FOR_OR_FIX_DELAYED_SHARDS, List.of("test"))) + ); + } + + private void indexRandomData(String indexName) throws Exception { + int numDocs = scaledRandomIntBetween(100, 1000); + IndexRequestBuilder[] builders = new IndexRequestBuilder[numDocs]; + for (int i = 0; i < builders.length; i++) { + builders[i] = client().prepareIndex(indexName).setSource("field", "value"); + } + // we want to test both full divergent copies of the shard in terms of segments, and + // a case where they are the same (using sync flush), index Random does all this goodness + // already + indexRandom(true, builders); + } + + private String findNodeWithPrimaryShard(String indexName, int shard) { + return findNodeWithShard(indexName, shard, true); + } + + private String findNodeWithReplicaShard(String indexName, int shard) { + return findNodeWithShard(indexName, shard, false); + } + + private String findNodeWithShard(final String indexName, final int shard, final boolean primary) { + ClusterState state = client().admin().cluster().prepareState().get().getState(); + List startedShards = RoutingNodesHelper.shardsWithState(state.getRoutingNodes(), ShardRoutingState.STARTED); + startedShards = startedShards.stream() + .filter(shardRouting -> shardRouting.getIndexName().equals(indexName)) + .filter(shardRouting -> shard == shardRouting.getId()) + .filter(shardRouting -> primary == shardRouting.primary()) + .collect(Collectors.toList()); + Collections.shuffle(startedShards, random()); + return state.nodes().get(startedShards.get(0).currentNodeId()).getName(); + } +} diff --git a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ccr/action/PutFollowAction.java b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ccr/action/PutFollowAction.java index cf4846b761041..0d77d5c9a648c 100644 --- a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ccr/action/PutFollowAction.java +++ b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ccr/action/PutFollowAction.java @@ -15,9 +15,11 @@ import org.elasticsearch.action.support.ActiveShardCount; import org.elasticsearch.action.support.IndicesOptions; import org.elasticsearch.action.support.master.AcknowledgedRequest; +import org.elasticsearch.common.Strings; import org.elasticsearch.common.io.stream.StreamInput; import org.elasticsearch.common.io.stream.StreamOutput; import org.elasticsearch.common.settings.Settings; +import org.elasticsearch.core.Nullable; import org.elasticsearch.xcontent.ObjectParser; import org.elasticsearch.xcontent.ParseField; import org.elasticsearch.xcontent.ToXContentObject; @@ -43,6 +45,7 @@ public static class Request extends AcknowledgedRequest implements Indi private static final ParseField REMOTE_CLUSTER_FIELD = new ParseField("remote_cluster"); private static final ParseField LEADER_INDEX_FIELD = new ParseField("leader_index"); private static final ParseField SETTINGS_FIELD = new ParseField("settings"); + private static final ParseField DATA_STREAM_NAME = new ParseField("data_stream_name"); // Note that Request should be the Value class here for this parser with a 'parameters' field that maps to // PutFollowParameters class. But since two minor version are already released with duplicate follow parameters @@ -52,6 +55,7 @@ public static class Request extends AcknowledgedRequest implements Indi static { PARSER.declareString((putFollowParameters, value) -> putFollowParameters.remoteCluster = value, REMOTE_CLUSTER_FIELD); PARSER.declareString((putFollowParameters, value) -> putFollowParameters.leaderIndex = value, LEADER_INDEX_FIELD); + PARSER.declareString((putFollowParameters, value) -> putFollowParameters.dataStreamName = value, DATA_STREAM_NAME); PARSER.declareObject( (putFollowParameters, value) -> putFollowParameters.settings = value, (p, c) -> Settings.fromXContent(p), @@ -69,6 +73,7 @@ public static Request fromXContent(final XContentParser parser, final String fol request.setFollowerIndex(followerIndex); request.setRemoteCluster(parameters.remoteCluster); request.setLeaderIndex(parameters.leaderIndex); + request.setDataStreamName(parameters.dataStreamName); request.setSettings(parameters.settings); request.setParameters(parameters); return request; @@ -76,8 +81,10 @@ public static Request fromXContent(final XContentParser parser, final String fol private String remoteCluster; private String leaderIndex; - private Settings settings = Settings.EMPTY; private String followerIndex; + @Nullable + private String dataStreamName; + private Settings settings = Settings.EMPTY; private FollowParameters parameters = new FollowParameters(); private ActiveShardCount waitForActiveShards = ActiveShardCount.NONE; @@ -123,6 +130,15 @@ public void setParameters(FollowParameters parameters) { this.parameters = parameters; } + @Nullable + public String getDataStreamName() { + return dataStreamName; + } + + public void setDataStreamName(String dataStreamName) { + this.dataStreamName = dataStreamName; + } + public ActiveShardCount waitForActiveShards() { return waitForActiveShards; } @@ -156,6 +172,9 @@ public ActionRequestValidationException validate() { if (followerIndex == null) { e = addValidationError("follower_index is missing", e); } + if (dataStreamName != null && Strings.hasText(dataStreamName) == false) { + e = addValidationError("data stream name must contain text if present", e); + } return e; } @@ -179,6 +198,9 @@ public Request(StreamInput in) throws IOException { } this.parameters = new FollowParameters(in); waitForActiveShards(ActiveShardCount.readFrom(in)); + if (in.getVersion().onOrAfter(Version.V_8_4_0)) { + this.dataStreamName = in.readOptionalString(); + } } @Override @@ -192,6 +214,9 @@ public void writeTo(StreamOutput out) throws IOException { } parameters.writeTo(out); waitForActiveShards.writeTo(out); + if (out.getVersion().onOrAfter(Version.V_8_4_0)) { + out.writeOptionalString(this.dataStreamName); + } } @Override @@ -200,6 +225,9 @@ public XContentBuilder toXContent(XContentBuilder builder, Params params) throws { builder.field(REMOTE_CLUSTER_FIELD.getPreferredName(), remoteCluster); builder.field(LEADER_INDEX_FIELD.getPreferredName(), leaderIndex); + if (dataStreamName != null) { + builder.field(DATA_STREAM_NAME.getPreferredName(), dataStreamName); + } if (settings.isEmpty() == false) { builder.startObject(SETTINGS_FIELD.getPreferredName()); { @@ -222,12 +250,14 @@ public boolean equals(Object o) { && Objects.equals(leaderIndex, request.leaderIndex) && Objects.equals(followerIndex, request.followerIndex) && Objects.equals(parameters, request.parameters) - && Objects.equals(waitForActiveShards, request.waitForActiveShards); + && Objects.equals(waitForActiveShards, request.waitForActiveShards) + && Objects.equals(dataStreamName, request.dataStreamName) + && Objects.equals(settings, request.settings); } @Override public int hashCode() { - return Objects.hash(remoteCluster, leaderIndex, followerIndex, parameters, waitForActiveShards); + return Objects.hash(remoteCluster, leaderIndex, followerIndex, parameters, settings, waitForActiveShards, dataStreamName); } // This class only exists for reuse of the FollowParameters class, see comment above the parser field. @@ -235,6 +265,7 @@ private static class PutFollowParameters extends FollowParameters { private String remoteCluster; private String leaderIndex; + private String dataStreamName; private Settings settings = Settings.EMPTY; } diff --git a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ilm/SwapAliasesAndDeleteSourceIndexStep.java b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ilm/SwapAliasesAndDeleteSourceIndexStep.java index b5bbd65f0d2c7..9289ac79efbf5 100644 --- a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ilm/SwapAliasesAndDeleteSourceIndexStep.java +++ b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ilm/SwapAliasesAndDeleteSourceIndexStep.java @@ -132,6 +132,7 @@ static void deleteSourceIndexAndTransferAliases( .searchRouting(aliasMetaDataToAdd.searchRouting()) .filter(aliasMetaDataToAdd.filter() == null ? null : aliasMetaDataToAdd.filter().string()) .writeIndex(null) + .isHidden(aliasMetaDataToAdd.isHidden()) ); }); diff --git a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ml/action/StartTrainedModelDeploymentAction.java b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ml/action/StartTrainedModelDeploymentAction.java index 2aacee4f3766f..aaf111d5a568f 100644 --- a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ml/action/StartTrainedModelDeploymentAction.java +++ b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ml/action/StartTrainedModelDeploymentAction.java @@ -71,6 +71,10 @@ public static class Request extends MasterNodeRequest implements ToXCon AllocationStatus.State.FULLY_ALLOCATED }; private static final int MAX_THREADS_PER_ALLOCATION = 32; + /** + * If the queue is created then we can OOM when we create the queue. + */ + private static final int MAX_QUEUE_CAPACITY = 1_000_000; public static final ParseField MODEL_ID = new ParseField("model_id"); public static final ParseField TIMEOUT = new ParseField("timeout"); @@ -248,6 +252,9 @@ public ActionRequestValidationException validate() { if (queueCapacity < 1) { validationException.addValidationError("[" + QUEUE_CAPACITY + "] must be a positive integer"); } + if (queueCapacity > MAX_QUEUE_CAPACITY) { + validationException.addValidationError("[" + QUEUE_CAPACITY + "] must be less than " + MAX_QUEUE_CAPACITY); + } return validationException.validationErrors().isEmpty() ? null : validationException; } diff --git a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ml/inference/assignment/TrainedModelAssignment.java b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ml/inference/assignment/TrainedModelAssignment.java index e4fc15e669f80..91258c00f7c71 100644 --- a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ml/inference/assignment/TrainedModelAssignment.java +++ b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ml/inference/assignment/TrainedModelAssignment.java @@ -9,6 +9,7 @@ import org.elasticsearch.ResourceAlreadyExistsException; import org.elasticsearch.ResourceNotFoundException; +import org.elasticsearch.Version; import org.elasticsearch.cluster.SimpleDiffable; import org.elasticsearch.common.Randomness; import org.elasticsearch.common.io.stream.StreamInput; @@ -48,6 +49,7 @@ public class TrainedModelAssignment implements SimpleDiffable PARSER = new ConstructingObjectParser<>( @@ -59,7 +61,8 @@ public class TrainedModelAssignment implements SimpleDiffable nodeRoutingTable, AssignmentState assignmentState, String reason, - Instant startTime + Instant startTime, + Integer maxAssignedAllocations ) { this.taskParams = ExceptionsHelper.requireNonNull(taskParams, TASK_PARAMETERS); this.nodeRoutingTable = ExceptionsHelper.requireNonNull(nodeRoutingTable, ROUTING_TABLE); this.assignmentState = ExceptionsHelper.requireNonNull(assignmentState, ASSIGNMENT_STATE); this.reason = reason; this.startTime = ExceptionsHelper.requireNonNull(startTime, START_TIME); + this.maxAssignedAllocations = maxAssignedAllocations == null + ? totalCurrentAllocations() + : Math.max(maxAssignedAllocations, totalCurrentAllocations()); } public TrainedModelAssignment(StreamInput in) throws IOException { @@ -125,6 +142,11 @@ public TrainedModelAssignment(StreamInput in) throws IOException { this.assignmentState = in.readEnum(AssignmentState.class); this.reason = in.readOptionalString(); this.startTime = in.readInstant(); + if (in.getVersion().onOrAfter(Version.V_8_4_0)) { + this.maxAssignedAllocations = in.readVInt(); + } else { + this.maxAssignedAllocations = totalCurrentAllocations(); + } } public boolean isRoutedToNode(String nodeId) { @@ -189,6 +211,10 @@ public Instant getStartTime() { return startTime; } + public int getMaxAssignedAllocations() { + return maxAssignedAllocations; + } + public boolean isSatisfied(Set assignableNodeIds) { int allocations = nodeRoutingTable.entrySet() .stream() @@ -203,6 +229,10 @@ public boolean hasOutdatedRoutingEntries() { return nodeRoutingTable.values().stream().anyMatch(RoutingInfo::isOutdated); } + public int totalCurrentAllocations() { + return nodeRoutingTable.values().stream().mapToInt(RoutingInfo::getCurrentAllocations).sum(); + } + @Override public boolean equals(Object o) { if (this == o) return true; @@ -212,12 +242,13 @@ public boolean equals(Object o) { && Objects.equals(taskParams, that.taskParams) && Objects.equals(reason, that.reason) && Objects.equals(assignmentState, that.assignmentState) - && Objects.equals(startTime, that.startTime); + && Objects.equals(startTime, that.startTime) + && maxAssignedAllocations == that.maxAssignedAllocations; } @Override public int hashCode() { - return Objects.hash(nodeRoutingTable, taskParams, assignmentState, reason, startTime); + return Objects.hash(nodeRoutingTable, taskParams, assignmentState, reason, startTime, maxAssignedAllocations); } @Override @@ -230,6 +261,7 @@ public XContentBuilder toXContent(XContentBuilder builder, Params params) throws builder.field(REASON.getPreferredName(), reason); } builder.timeField(START_TIME.getPreferredName(), startTime); + builder.field(MAX_ASSIGNED_ALLOCATIONS.getPreferredName(), maxAssignedAllocations); builder.endObject(); return builder; } @@ -241,6 +273,9 @@ public void writeTo(StreamOutput out) throws IOException { out.writeEnum(assignmentState); out.writeOptionalString(reason); out.writeInstant(startTime); + if (out.getVersion().onOrAfter(Version.V_8_4_0)) { + out.writeVInt(maxAssignedAllocations); + } } public Optional calculateAllocationStatus() { @@ -261,6 +296,7 @@ public static class Builder { private AssignmentState assignmentState; private String reason; private Instant startTime; + private int maxAssignedAllocations; public static Builder fromAssignment(TrainedModelAssignment assignment) { return new Builder( @@ -268,7 +304,8 @@ public static Builder fromAssignment(TrainedModelAssignment assignment) { assignment.nodeRoutingTable, assignment.assignmentState, assignment.reason, - assignment.startTime + assignment.startTime, + assignment.maxAssignedAllocations ); } @@ -281,17 +318,19 @@ private Builder( Map nodeRoutingTable, AssignmentState assignmentState, String reason, - Instant startTime + Instant startTime, + int maxAssignedAllocations ) { this.taskParams = taskParams; this.nodeRoutingTable = new LinkedHashMap<>(nodeRoutingTable); this.assignmentState = assignmentState; this.reason = reason; this.startTime = startTime; + this.maxAssignedAllocations = maxAssignedAllocations; } private Builder(StartTrainedModelDeploymentAction.TaskParams taskParams) { - this(taskParams, new LinkedHashMap<>(), AssignmentState.STARTING, null, Instant.now()); + this(taskParams, new LinkedHashMap<>(), AssignmentState.STARTING, null, Instant.now(), 0); } public Builder setStartTime(Instant startTime) { @@ -299,6 +338,11 @@ public Builder setStartTime(Instant startTime) { return this; } + public Builder setMaxAssignedAllocations(int maxAssignedAllocations) { + this.maxAssignedAllocations = maxAssignedAllocations; + return this; + } + public Builder addRoutingEntry(String nodeId, RoutingInfo routingInfo) { if (nodeRoutingTable.containsKey(nodeId)) { throw new ResourceAlreadyExistsException( @@ -383,7 +427,7 @@ public Builder clearReason() { } public TrainedModelAssignment build() { - return new TrainedModelAssignment(taskParams, nodeRoutingTable, assignmentState, reason, startTime); + return new TrainedModelAssignment(taskParams, nodeRoutingTable, assignmentState, reason, startTime, maxAssignedAllocations); } } } diff --git a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ml/inference/trainedmodel/ZeroShotClassificationConfig.java b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ml/inference/trainedmodel/ZeroShotClassificationConfig.java index 3ee09ffc1e837..710a2855167cf 100644 --- a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ml/inference/trainedmodel/ZeroShotClassificationConfig.java +++ b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ml/inference/trainedmodel/ZeroShotClassificationConfig.java @@ -248,8 +248,8 @@ public String getHypothesisTemplate() { return hypothesisTemplate; } - public List getLabels() { - return Optional.ofNullable(labels).orElse(List.of()); + public Optional> getLabels() { + return Optional.ofNullable(labels); } @Override diff --git a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ml/inference/trainedmodel/ZeroShotClassificationConfigUpdate.java b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ml/inference/trainedmodel/ZeroShotClassificationConfigUpdate.java index 3cf9f8c8f8354..acfd726ca27a5 100644 --- a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ml/inference/trainedmodel/ZeroShotClassificationConfigUpdate.java +++ b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ml/inference/trainedmodel/ZeroShotClassificationConfigUpdate.java @@ -147,13 +147,13 @@ public InferenceConfig apply(InferenceConfig originalConfig) { tokenizationUpdate == null ? zeroShotConfig.getTokenization() : tokenizationUpdate.apply(zeroShotConfig.getTokenization()), zeroShotConfig.getHypothesisTemplate(), Optional.ofNullable(isMultiLabel).orElse(zeroShotConfig.isMultiLabel()), - Optional.ofNullable(labels).orElse(zeroShotConfig.getLabels()), + Optional.ofNullable(labels).orElse(zeroShotConfig.getLabels().orElse(null)), Optional.ofNullable(resultsField).orElse(zeroShotConfig.getResultsField()) ); } boolean isNoop(ZeroShotClassificationConfig originalConfig) { - return (labels == null || labels.equals(originalConfig.getLabels())) + return (labels == null || labels.equals(originalConfig.getLabels().orElse(null))) && (isMultiLabel == null || isMultiLabel.equals(originalConfig.isMultiLabel())) && (resultsField == null || resultsField.equals(originalConfig.getResultsField())) && super.isNoop(); diff --git a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ml/job/config/AnalysisConfig.java b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ml/job/config/AnalysisConfig.java index a7dd7f480705f..c52ccbe6e1b20 100644 --- a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ml/job/config/AnalysisConfig.java +++ b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ml/job/config/AnalysisConfig.java @@ -69,6 +69,14 @@ public class AnalysisConfig implements ToXContentObject, Writeable { public static final String ML_CATEGORY_FIELD = "mlcategory"; public static final Set AUTO_CREATED_FIELDS = new HashSet<>(Collections.singletonList(ML_CATEGORY_FIELD)); + // Since the C++ backend truncates the categorization field at length 1000 (see model::CCategoryExamplesCollector::MAX_EXAMPLE_LENGTH), + // adding an ellipsis on truncation, it makes no sense to send potentially very long strings to it. For the backend logic still to work + // we need to send more than that, hence we truncate at length 1001. + // + // Also, because we do the tokenization on the Java side now the tokens will still be sent correctly (separately) to the C++ backend + // even if they extend beyond the length of a truncated example. + public static final int MAX_CATEGORIZATION_FIELD_LENGTH = 1001; + // These parsers follow the pattern that metadata is parsed leniently (to allow for enhancements), whilst config is parsed strictly public static final ConstructingObjectParser LENIENT_PARSER = createParser(true); public static final ConstructingObjectParser STRICT_PARSER = createParser(false); diff --git a/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/ilm/SwapAliasesAndDeleteSourceIndexStepTests.java b/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/ilm/SwapAliasesAndDeleteSourceIndexStepTests.java index 40c1f787f4a2b..bbc8ab8c0a543 100644 --- a/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/ilm/SwapAliasesAndDeleteSourceIndexStepTests.java +++ b/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/ilm/SwapAliasesAndDeleteSourceIndexStepTests.java @@ -72,6 +72,7 @@ public void testPerformAction() { .settings(settings(Version.CURRENT)) .numberOfShards(randomIntBetween(1, 5)) .numberOfReplicas(randomIntBetween(0, 5)); + Boolean isHidden = randomFrom(Boolean.TRUE, Boolean.FALSE, null); AliasMetadata.Builder aliasBuilder = AliasMetadata.builder(randomAlphaOfLengthBetween(3, 10)); if (randomBoolean()) { aliasBuilder.routing(randomAlphaOfLengthBetween(1, 10)); @@ -83,6 +84,7 @@ public void testPerformAction() { aliasBuilder.indexRouting(randomAlphaOfLengthBetween(1, 10)); } aliasBuilder.writeIndex(randomBoolean()); + aliasBuilder.isHidden(isHidden); AliasMetadata aliasMetadata = aliasBuilder.build(); IndexMetadata sourceIndexMetadata = sourceIndexMetadataBuilder.putAlias(aliasMetadata).build(); @@ -98,6 +100,7 @@ public void testPerformAction() { .searchRouting(aliasMetadata.searchRouting()) .indexRouting(aliasMetadata.indexRouting()) .writeIndex(null) + .isHidden(isHidden) ); try (NoOpClient client = getIndicesAliasAssertingClient(expectedAliasActions)) { diff --git a/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/ml/action/StartTrainedModelDeploymentRequestTests.java b/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/ml/action/StartTrainedModelDeploymentRequestTests.java index 6cdd355997d4e..ce0c34f0e8aa1 100644 --- a/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/ml/action/StartTrainedModelDeploymentRequestTests.java +++ b/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/ml/action/StartTrainedModelDeploymentRequestTests.java @@ -53,13 +53,13 @@ public static Request createRandom() { request.setWaitForState(randomFrom(AllocationStatus.State.values())); } if (randomBoolean()) { - request.setThreadsPerAllocation(randomIntBetween(1, 8)); + request.setThreadsPerAllocation(randomFrom(1, 2, 4, 8, 16, 32)); } if (randomBoolean()) { request.setNumberOfAllocations(randomIntBetween(1, 8)); } if (randomBoolean()) { - request.setQueueCapacity(randomIntBetween(1, 10000)); + request.setQueueCapacity(randomIntBetween(1, 1000000)); } return request; } @@ -150,6 +150,25 @@ public void testValidate_GivenQueueCapacityIsNegative() { assertThat(e.getMessage(), containsString("[queue_capacity] must be a positive integer")); } + public void testValidate_GivenQueueCapacityIsAtLimit() { + Request request = createRandom(); + request.setQueueCapacity(1_000_000); + + ActionRequestValidationException e = request.validate(); + + assertThat(e, is(nullValue())); + } + + public void testValidate_GivenQueueCapacityIsOverLimit() { + Request request = createRandom(); + request.setQueueCapacity(1_000_001); + + ActionRequestValidationException e = request.validate(); + + assertThat(e, is(not(nullValue()))); + assertThat(e.getMessage(), containsString("[queue_capacity] must be less than 1000000")); + } + public void testDefaults() { Request request = new Request(randomAlphaOfLength(10)); assertThat(request.getTimeout(), equalTo(TimeValue.timeValueSeconds(20))); diff --git a/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/ml/inference/InferenceConfigItemTestCase.java b/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/ml/inference/InferenceConfigItemTestCase.java index 79157bcb5ab27..97eb38790d071 100644 --- a/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/ml/inference/InferenceConfigItemTestCase.java +++ b/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/ml/inference/InferenceConfigItemTestCase.java @@ -15,6 +15,22 @@ import org.elasticsearch.xcontent.NamedXContentRegistry; import org.elasticsearch.xcontent.ToXContent; import org.elasticsearch.xpack.core.ml.AbstractBWCSerializationTestCase; +import org.elasticsearch.xpack.core.ml.inference.trainedmodel.FillMaskConfig; +import org.elasticsearch.xpack.core.ml.inference.trainedmodel.FillMaskConfigTests; +import org.elasticsearch.xpack.core.ml.inference.trainedmodel.InferenceConfig; +import org.elasticsearch.xpack.core.ml.inference.trainedmodel.NerConfig; +import org.elasticsearch.xpack.core.ml.inference.trainedmodel.NerConfigTests; +import org.elasticsearch.xpack.core.ml.inference.trainedmodel.NlpConfig; +import org.elasticsearch.xpack.core.ml.inference.trainedmodel.PassThroughConfig; +import org.elasticsearch.xpack.core.ml.inference.trainedmodel.PassThroughConfigTests; +import org.elasticsearch.xpack.core.ml.inference.trainedmodel.QuestionAnsweringConfig; +import org.elasticsearch.xpack.core.ml.inference.trainedmodel.QuestionAnsweringConfigTests; +import org.elasticsearch.xpack.core.ml.inference.trainedmodel.TextClassificationConfig; +import org.elasticsearch.xpack.core.ml.inference.trainedmodel.TextClassificationConfigTests; +import org.elasticsearch.xpack.core.ml.inference.trainedmodel.TextEmbeddingConfig; +import org.elasticsearch.xpack.core.ml.inference.trainedmodel.TextEmbeddingConfigTests; +import org.elasticsearch.xpack.core.ml.inference.trainedmodel.ZeroShotClassificationConfig; +import org.elasticsearch.xpack.core.ml.inference.trainedmodel.ZeroShotClassificationConfigTests; import java.util.ArrayList; import java.util.Collections; @@ -25,6 +41,27 @@ public abstract class InferenceConfigItemTestCase extends AbstractBWCSerializationTestCase< T> { + + static InferenceConfig mutateForVersion(NlpConfig inferenceConfig, Version version) { + if (inferenceConfig instanceof TextClassificationConfig textClassificationConfig) { + return TextClassificationConfigTests.mutateForVersion(textClassificationConfig, version); + } else if (inferenceConfig instanceof FillMaskConfig fillMaskConfig) { + return FillMaskConfigTests.mutateForVersion(fillMaskConfig, version); + } else if (inferenceConfig instanceof QuestionAnsweringConfig questionAnsweringConfig) { + return QuestionAnsweringConfigTests.mutateForVersion(questionAnsweringConfig, version); + } else if (inferenceConfig instanceof NerConfig nerConfig) { + return NerConfigTests.mutateForVersion(nerConfig, version); + } else if (inferenceConfig instanceof PassThroughConfig passThroughConfig) { + return PassThroughConfigTests.mutateForVersion(passThroughConfig, version); + } else if (inferenceConfig instanceof TextEmbeddingConfig textEmbeddingConfig) { + return TextEmbeddingConfigTests.mutateForVersion(textEmbeddingConfig, version); + } else if (inferenceConfig instanceof ZeroShotClassificationConfig zeroShotClassificationConfig) { + return ZeroShotClassificationConfigTests.mutateForVersion(zeroShotClassificationConfig, version); + } else { + throw new IllegalArgumentException("unknown inference config [" + inferenceConfig.getName() + "]"); + } + } + @Override protected NamedXContentRegistry xContentRegistry() { List namedXContent = new ArrayList<>(); diff --git a/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/ml/inference/TrainedModelConfigTests.java b/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/ml/inference/TrainedModelConfigTests.java index 8f4bc321fa1fb..9b014d338e6bd 100644 --- a/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/ml/inference/TrainedModelConfigTests.java +++ b/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/ml/inference/TrainedModelConfigTests.java @@ -28,9 +28,9 @@ import org.elasticsearch.xpack.core.ml.inference.trainedmodel.IndexLocationTests; import org.elasticsearch.xpack.core.ml.inference.trainedmodel.InferenceConfig; import org.elasticsearch.xpack.core.ml.inference.trainedmodel.NerConfigTests; +import org.elasticsearch.xpack.core.ml.inference.trainedmodel.NlpConfig; import org.elasticsearch.xpack.core.ml.inference.trainedmodel.PassThroughConfigTests; import org.elasticsearch.xpack.core.ml.inference.trainedmodel.RegressionConfigTests; -import org.elasticsearch.xpack.core.ml.inference.trainedmodel.TextClassificationConfig; import org.elasticsearch.xpack.core.ml.inference.trainedmodel.TextClassificationConfigTests; import org.elasticsearch.xpack.core.ml.inference.trainedmodel.TextEmbeddingConfigTests; import org.elasticsearch.xpack.core.ml.job.messages.Messages; @@ -391,8 +391,8 @@ protected TrainedModelConfig mutateInstanceForVersion(TrainedModelConfig instanc builder.setModelType(null); builder.setLocation(null); } - if (instance.getInferenceConfig()instanceof TextClassificationConfig textClassificationConfig) { - builder.setInferenceConfig(TextClassificationConfigTests.mutateInstance(textClassificationConfig, version)); + if (instance.getInferenceConfig()instanceof NlpConfig nlpConfig) { + builder.setInferenceConfig(InferenceConfigItemTestCase.mutateForVersion(nlpConfig, version)); } return builder.build(); } diff --git a/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/ml/inference/assignment/TrainedModelAssignmentTests.java b/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/ml/inference/assignment/TrainedModelAssignmentTests.java index 323fb60314dc6..812614f640fdb 100644 --- a/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/ml/inference/assignment/TrainedModelAssignmentTests.java +++ b/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/ml/inference/assignment/TrainedModelAssignmentTests.java @@ -257,6 +257,21 @@ public void testIsSatisfied_GivenNotEnoughAllocations() { assertThat(assignment.isSatisfied(Sets.newHashSet("node-1", "node-2", "node-3")), is(false)); } + public void testMaxAssignedAllocations() { + TrainedModelAssignment assignment = TrainedModelAssignment.Builder.empty(randomTaskParams(10)) + .addRoutingEntry("node-1", new RoutingInfo(1, 2, RoutingState.STARTED, "")) + .addRoutingEntry("node-2", new RoutingInfo(2, 1, RoutingState.STARTED, "")) + .addRoutingEntry("node-3", new RoutingInfo(3, 3, RoutingState.STARTING, "")) + .build(); + assertThat(assignment.getMaxAssignedAllocations(), equalTo(6)); + + TrainedModelAssignment assignmentAfterRemovingNode = TrainedModelAssignment.Builder.fromAssignment(assignment) + .removeRoutingEntry("node-1") + .build(); + assertThat(assignmentAfterRemovingNode.getMaxAssignedAllocations(), equalTo(6)); + assertThat(assignmentAfterRemovingNode.totalCurrentAllocations(), equalTo(5)); + } + private void assertValueWithinPercentageOfExpectedRatio(long value, long totalCount, double ratio, double tolerance) { double expected = totalCount * ratio; double lowerBound = (1.0 - tolerance) * expected; diff --git a/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/ml/inference/trainedmodel/BertTokenizationTests.java b/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/ml/inference/trainedmodel/BertTokenizationTests.java index 9a84c254c5452..952e6b4372534 100644 --- a/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/ml/inference/trainedmodel/BertTokenizationTests.java +++ b/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/ml/inference/trainedmodel/BertTokenizationTests.java @@ -19,6 +19,19 @@ public class BertTokenizationTests extends AbstractBWCSerializationTestCase getRandomFieldsExcludeFilter() { return field -> field.isEmpty() == false; @@ -44,7 +53,7 @@ protected FillMaskConfig createTestInstance() { @Override protected FillMaskConfig mutateInstanceForVersion(FillMaskConfig instance, Version version) { - return instance; + return mutateForVersion(instance, version); } public static FillMaskConfig createRandom() { diff --git a/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/ml/inference/trainedmodel/InferenceConfigTestScaffolding.java b/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/ml/inference/trainedmodel/InferenceConfigTestScaffolding.java index 43020fe23e114..228cdb40e3a89 100644 --- a/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/ml/inference/trainedmodel/InferenceConfigTestScaffolding.java +++ b/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/ml/inference/trainedmodel/InferenceConfigTestScaffolding.java @@ -7,8 +7,22 @@ package org.elasticsearch.xpack.core.ml.inference.trainedmodel; +import org.elasticsearch.Version; + public final class InferenceConfigTestScaffolding { + static Tokenization mutateTokenizationForVersion(Tokenization tokenization, Version version) { + if (tokenization instanceof BertTokenization bertTokenization) { + return BertTokenizationTests.mutateForVersion(bertTokenization, version); + } else if (tokenization instanceof MPNetTokenization mpNetTokenization) { + return MPNetTokenizationTests.mutateForVersion(mpNetTokenization, version); + } else if (tokenization instanceof RobertaTokenization robertaTokenization) { + return RobertaTokenizationTests.mutateForVersion(robertaTokenization, version); + } else { + throw new IllegalArgumentException("unknown tokenization [" + tokenization.getName() + "]"); + } + } + static Tokenization cloneWithNewTruncation(Tokenization tokenization, Tokenization.Truncate truncate) { if (tokenization instanceof MPNetTokenization) { return new MPNetTokenization( diff --git a/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/ml/inference/trainedmodel/MPNetTokenizationTests.java b/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/ml/inference/trainedmodel/MPNetTokenizationTests.java index 4c01935a7ef43..dead82c736445 100644 --- a/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/ml/inference/trainedmodel/MPNetTokenizationTests.java +++ b/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/ml/inference/trainedmodel/MPNetTokenizationTests.java @@ -19,6 +19,19 @@ public class MPNetTokenizationTests extends AbstractBWCSerializationTestCase { + public static NerConfig mutateForVersion(NerConfig instance, Version version) { + return new NerConfig( + instance.getVocabularyConfig(), + InferenceConfigTestScaffolding.mutateTokenizationForVersion(instance.getTokenization(), version), + instance.getClassificationLabels(), + instance.getResultsField() + ); + } + @Override protected boolean supportsUnknownFields() { return true; @@ -48,7 +57,7 @@ protected NerConfig createTestInstance() { @Override protected NerConfig mutateInstanceForVersion(NerConfig instance, Version version) { - return instance; + return mutateForVersion(instance, version); } public static NerConfig createRandom() { diff --git a/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/ml/inference/trainedmodel/PassThroughConfigTests.java b/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/ml/inference/trainedmodel/PassThroughConfigTests.java index 3701a07b73d5b..28e107101d288 100644 --- a/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/ml/inference/trainedmodel/PassThroughConfigTests.java +++ b/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/ml/inference/trainedmodel/PassThroughConfigTests.java @@ -17,6 +17,14 @@ public class PassThroughConfigTests extends InferenceConfigItemTestCase { + public static PassThroughConfig mutateForVersion(PassThroughConfig instance, Version version) { + return new PassThroughConfig( + instance.getVocabularyConfig(), + InferenceConfigTestScaffolding.mutateTokenizationForVersion(instance.getTokenization(), version), + instance.getResultsField() + ); + } + @Override protected boolean supportsUnknownFields() { return true; @@ -44,7 +52,7 @@ protected PassThroughConfig createTestInstance() { @Override protected PassThroughConfig mutateInstanceForVersion(PassThroughConfig instance, Version version) { - return instance; + return mutateForVersion(instance, version); } public static PassThroughConfig createRandom() { diff --git a/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/ml/inference/trainedmodel/QuestionAnsweringConfigTests.java b/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/ml/inference/trainedmodel/QuestionAnsweringConfigTests.java index 2ad335d3cf4b0..0f8f2f0783660 100644 --- a/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/ml/inference/trainedmodel/QuestionAnsweringConfigTests.java +++ b/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/ml/inference/trainedmodel/QuestionAnsweringConfigTests.java @@ -17,6 +17,16 @@ public class QuestionAnsweringConfigTests extends InferenceConfigItemTestCase { + public static QuestionAnsweringConfig mutateForVersion(QuestionAnsweringConfig instance, Version version) { + return new QuestionAnsweringConfig( + instance.getNumTopClasses(), + instance.getMaxAnswerLength(), + instance.getVocabularyConfig(), + InferenceConfigTestScaffolding.mutateTokenizationForVersion(instance.getTokenization(), version), + instance.getResultsField() + ); + } + @Override protected boolean supportsUnknownFields() { return true; @@ -44,7 +54,7 @@ protected QuestionAnsweringConfig createTestInstance() { @Override protected QuestionAnsweringConfig mutateInstanceForVersion(QuestionAnsweringConfig instance, Version version) { - return instance; + return mutateForVersion(instance, version); } public static QuestionAnsweringConfig createRandom() { diff --git a/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/ml/inference/trainedmodel/RobertaTokenizationTests.java b/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/ml/inference/trainedmodel/RobertaTokenizationTests.java index 0803fec7304bc..920933be7450e 100644 --- a/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/ml/inference/trainedmodel/RobertaTokenizationTests.java +++ b/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/ml/inference/trainedmodel/RobertaTokenizationTests.java @@ -19,6 +19,19 @@ public class RobertaTokenizationTests extends AbstractBWCSerializationTestCase { - public static TextClassificationConfig mutateInstance(TextClassificationConfig instance, Version version) { - if (version.before(Version.V_8_2_0)) { - final Tokenization tokenization; - if (instance.getTokenization() instanceof BertTokenization) { - tokenization = new BertTokenization( - instance.getTokenization().doLowerCase, - instance.getTokenization().withSpecialTokens, - instance.getTokenization().maxSequenceLength, - instance.getTokenization().truncate, - null - ); - } else if (instance.getTokenization() instanceof MPNetTokenization) { - tokenization = new MPNetTokenization( - instance.getTokenization().doLowerCase, - instance.getTokenization().withSpecialTokens, - instance.getTokenization().maxSequenceLength, - instance.getTokenization().truncate, - null - ); - } else { - throw new UnsupportedOperationException("unknown tokenization type: " + instance.getTokenization().getName()); - } - return new TextClassificationConfig( - instance.getVocabularyConfig(), - tokenization, - instance.getClassificationLabels(), - instance.getNumTopClasses(), - instance.getResultsField() - ); - } - return instance; + public static TextClassificationConfig mutateForVersion(TextClassificationConfig instance, Version version) { + return new TextClassificationConfig( + instance.getVocabularyConfig(), + InferenceConfigTestScaffolding.mutateTokenizationForVersion(instance.getTokenization(), version), + instance.getClassificationLabels(), + instance.getNumTopClasses(), + instance.getResultsField() + ); } @Override @@ -81,7 +58,7 @@ protected TextClassificationConfig createTestInstance() { @Override protected TextClassificationConfig mutateInstanceForVersion(TextClassificationConfig instance, Version version) { - return mutateInstance(instance, version); + return mutateForVersion(instance, version); } public void testInvalidClassificationLabels() { diff --git a/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/ml/inference/trainedmodel/TextEmbeddingConfigTests.java b/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/ml/inference/trainedmodel/TextEmbeddingConfigTests.java index 373f3d3102e15..d60a8b28107da 100644 --- a/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/ml/inference/trainedmodel/TextEmbeddingConfigTests.java +++ b/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/ml/inference/trainedmodel/TextEmbeddingConfigTests.java @@ -17,6 +17,14 @@ public class TextEmbeddingConfigTests extends InferenceConfigItemTestCase { + public static TextEmbeddingConfig mutateForVersion(TextEmbeddingConfig instance, Version version) { + return new TextEmbeddingConfig( + instance.getVocabularyConfig(), + InferenceConfigTestScaffolding.mutateTokenizationForVersion(instance.getTokenization(), version), + instance.getResultsField() + ); + } + @Override protected boolean supportsUnknownFields() { return true; @@ -44,7 +52,7 @@ protected TextEmbeddingConfig createTestInstance() { @Override protected TextEmbeddingConfig mutateInstanceForVersion(TextEmbeddingConfig instance, Version version) { - return instance; + return mutateForVersion(instance, version); } public static TextEmbeddingConfig createRandom() { diff --git a/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/ml/inference/trainedmodel/ZeroShotClassificationConfigTests.java b/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/ml/inference/trainedmodel/ZeroShotClassificationConfigTests.java index 63b271c04dffb..48e4b25ea7316 100644 --- a/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/ml/inference/trainedmodel/ZeroShotClassificationConfigTests.java +++ b/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/ml/inference/trainedmodel/ZeroShotClassificationConfigTests.java @@ -18,6 +18,18 @@ public class ZeroShotClassificationConfigTests extends InferenceConfigItemTestCase { + public static ZeroShotClassificationConfig mutateForVersion(ZeroShotClassificationConfig instance, Version version) { + return new ZeroShotClassificationConfig( + instance.getClassificationLabels(), + instance.getVocabularyConfig(), + InferenceConfigTestScaffolding.mutateTokenizationForVersion(instance.getTokenization(), version), + instance.getHypothesisTemplate(), + instance.isMultiLabel(), + instance.getLabels().orElse(null), + instance.getResultsField() + ); + } + @Override protected boolean supportsUnknownFields() { return true; @@ -45,7 +57,7 @@ protected ZeroShotClassificationConfig createTestInstance() { @Override protected ZeroShotClassificationConfig mutateInstanceForVersion(ZeroShotClassificationConfig instance, Version version) { - return instance; + return mutateForVersion(instance, version); } public static ZeroShotClassificationConfig createRandom() { diff --git a/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/ml/inference/trainedmodel/ZeroShotClassificationConfigUpdateTests.java b/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/ml/inference/trainedmodel/ZeroShotClassificationConfigUpdateTests.java index 7aa80885ed7f4..2d424edac4c94 100644 --- a/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/ml/inference/trainedmodel/ZeroShotClassificationConfigUpdateTests.java +++ b/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/ml/inference/trainedmodel/ZeroShotClassificationConfigUpdateTests.java @@ -125,7 +125,7 @@ public void testApply() { originalConfig.getTokenization(), originalConfig.getHypothesisTemplate(), true, - originalConfig.getLabels(), + originalConfig.getLabels().orElse(null), originalConfig.getResultsField() ), equalTo(new ZeroShotClassificationConfigUpdate.Builder().setMultiLabel(true).build().apply(originalConfig)) @@ -137,7 +137,7 @@ public void testApply() { originalConfig.getTokenization(), originalConfig.getHypothesisTemplate(), originalConfig.isMultiLabel(), - originalConfig.getLabels(), + originalConfig.getLabels().orElse(null), "updated-field" ), equalTo(new ZeroShotClassificationConfigUpdate.Builder().setResultsField("updated-field").build().apply(originalConfig)) @@ -152,7 +152,7 @@ public void testApply() { tokenization, originalConfig.getHypothesisTemplate(), originalConfig.isMultiLabel(), - originalConfig.getLabels(), + originalConfig.getLabels().orElse(null), originalConfig.getResultsField() ), equalTo( diff --git a/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/termsenum/TermsEnumRequestTests.java b/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/termsenum/TermsEnumRequestTests.java index 634b04ac458ea..e824508a6c5c6 100644 --- a/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/termsenum/TermsEnumRequestTests.java +++ b/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/termsenum/TermsEnumRequestTests.java @@ -93,7 +93,7 @@ protected TermsEnumRequest doParseInstance(XContentParser parser) throws IOExcep @Override protected TermsEnumRequest mutateInstance(TermsEnumRequest instance) throws IOException { List> mutators = new ArrayList<>(); - mutators.add(request -> { request.field(randomAlphaOfLengthBetween(3, 10)); }); + mutators.add(request -> { request.field(randomValueOtherThan(request.field(), () -> randomAlphaOfLengthBetween(3, 10))); }); mutators.add(request -> { String[] indices = ArrayUtils.concat(instance.indices(), generateRandomStringArray(5, 10, false, false)); request.indices(indices); diff --git a/x-pack/plugin/eql/qa/rest/src/yamlRestTest/resources/rest-api-spec/test/eql/20_runtime_mappings.yml b/x-pack/plugin/eql/qa/rest/src/yamlRestTest/resources/rest-api-spec/test/eql/20_runtime_mappings.yml index 292f69e3d6bef..58462786f9a2f 100644 --- a/x-pack/plugin/eql/qa/rest/src/yamlRestTest/resources/rest-api-spec/test/eql/20_runtime_mappings.yml +++ b/x-pack/plugin/eql/qa/rest/src/yamlRestTest/resources/rest-api-spec/test/eql/20_runtime_mappings.yml @@ -19,7 +19,7 @@ setup: _id: "1" - event: - category: process - "@timestamp": 2020-02-03T12:34:56Z + "@timestamp": "2020-02-03T12:34:56Z" user: SYSTEM id: 123 valid: false @@ -29,7 +29,7 @@ setup: _id: "2" - event: - category: process - "@timestamp": 2020-02-04T12:34:56Z + "@timestamp": "2020-02-04T12:34:56Z" user: SYSTEM id: 123 valid: true @@ -39,7 +39,7 @@ setup: _id: "3" - event: - category: process - "@timestamp": 2020-02-05T12:34:56Z + "@timestamp": "2020-02-05T12:34:56Z" user: SYSTEM id: 123 valid: true @@ -49,7 +49,7 @@ setup: _id: "4" - event: - category: process - "@timestamp": 2020-02-05T12:34:57Z + "@timestamp": "2020-02-05T12:34:57Z" user: SYSTEM id: 123 diff --git a/x-pack/plugin/eql/src/main/java/org/elasticsearch/xpack/eql/execution/sequence/SequenceMatcher.java b/x-pack/plugin/eql/src/main/java/org/elasticsearch/xpack/eql/execution/sequence/SequenceMatcher.java index 550c65da64d3a..8510f8a2debd0 100644 --- a/x-pack/plugin/eql/src/main/java/org/elasticsearch/xpack/eql/execution/sequence/SequenceMatcher.java +++ b/x-pack/plugin/eql/src/main/java/org/elasticsearch/xpack/eql/execution/sequence/SequenceMatcher.java @@ -82,7 +82,10 @@ public void clear() { private final Stats stats = new Stats(); private boolean headLimit = false; - private long totalRamBytesUsed = 0; + + // circuit breaker accounting + private long prevRamBytesUsedInFlight = 0; + private long prevRamBytesUsedCompleted = 0; @SuppressWarnings("rawtypes") public SequenceMatcher(int stages, boolean descending, TimeValue maxSpan, Limit limit, CircuitBreaker circuitBreaker) { @@ -114,9 +117,6 @@ private void trackSequence(Sequence sequence) { * Returns false if the process needs to be stopped. */ boolean match(int stage, Iterable> hits) { - long ramBytesUsedInFlight = ramBytesUsedInFlight(); - long ramBytesUsedCompleted = ramBytesUsedCompleted(); - for (Tuple tuple : hits) { KeyAndOrdinal ko = tuple.v1(); HitReference hit = tuple.v2(); @@ -145,7 +145,7 @@ boolean match(int stage, Iterable> hits) { log.trace("{}", stats); matched = true; } - trackMemory(ramBytesUsedInFlight, ramBytesUsedCompleted); + trackMemory(); return matched; } @@ -305,22 +305,20 @@ public void clear() { clearCircuitBreaker(); } - private long ramBytesUsedInFlight() { + // protected for testing purposes + protected long ramBytesUsedInFlight() { return RamUsageEstimator.sizeOf(keyToSequences) + RamUsageEstimator.sizeOf(stageToKeys); } - private long ramBytesUsedCompleted() { + // protected for testing purposes + protected long ramBytesUsedCompleted() { return RamUsageEstimator.sizeOfCollection(completed); } - private void addMemory(long bytes, String label) { - totalRamBytesUsed += bytes; - circuitBreaker.addEstimateBytesAndMaybeBreak(bytes, label); - } - private void clearCircuitBreaker() { - circuitBreaker.addWithoutBreaking(-totalRamBytesUsed); - totalRamBytesUsed = 0; + circuitBreaker.addWithoutBreaking(-prevRamBytesUsedInFlight - prevRamBytesUsedCompleted); + prevRamBytesUsedInFlight = 0; + prevRamBytesUsedCompleted = 0; } // The method is called at the end of match() which is called for every sub query in the sequence query @@ -328,11 +326,14 @@ private void clearCircuitBreaker() { // expensive, so we just calculate the difference in bytes of the total memory that the matcher's // structure occupy for the in-flight tracking of sequences, as well as for the list of completed // sequences. - private void trackMemory(long prevRamBytesUsedInflight, long prevRamBytesUsedCompleted) { - long bytesDiff = ramBytesUsedInFlight() - prevRamBytesUsedInflight; - addMemory(bytesDiff, CB_INFLIGHT_LABEL); - bytesDiff = ramBytesUsedCompleted() - prevRamBytesUsedCompleted; - addMemory(bytesDiff, CB_COMPLETED_LABEL); + private void trackMemory() { + long newRamBytesUsedInFlight = ramBytesUsedInFlight(); + circuitBreaker.addEstimateBytesAndMaybeBreak(newRamBytesUsedInFlight - prevRamBytesUsedInFlight, CB_INFLIGHT_LABEL); + prevRamBytesUsedInFlight = newRamBytesUsedInFlight; + + long newRamBytesUsedCompleted = ramBytesUsedCompleted(); + circuitBreaker.addEstimateBytesAndMaybeBreak(newRamBytesUsedCompleted - prevRamBytesUsedCompleted, CB_COMPLETED_LABEL); + prevRamBytesUsedCompleted = newRamBytesUsedCompleted; } @Override diff --git a/x-pack/plugin/eql/src/test/java/org/elasticsearch/xpack/eql/execution/sequence/CircuitBreakerTests.java b/x-pack/plugin/eql/src/test/java/org/elasticsearch/xpack/eql/execution/sequence/CircuitBreakerTests.java index ef56f5c160604..7787f3e6ef171 100644 --- a/x-pack/plugin/eql/src/test/java/org/elasticsearch/xpack/eql/execution/sequence/CircuitBreakerTests.java +++ b/x-pack/plugin/eql/src/test/java/org/elasticsearch/xpack/eql/execution/sequence/CircuitBreakerTests.java @@ -132,27 +132,7 @@ public void fetchHits(Iterable> refs, ActionListener> criteria = new ArrayList<>(stages); - - for (int i = 0; i < stages; i++) { - final int j = i; - criteria.add( - new Criterion<>( - i, - new BoxedQueryRequest( - () -> SearchSourceBuilder.searchSource().size(10).query(matchAllQuery()).terminateAfter(j), - "@timestamp", - emptyList(), - emptySet() - ), - keyExtractors, - tsExtractor, - null, - implicitTbExtractor, - false - ) - ); - } + List> criteria = buildCriteria(stages); SequenceMatcher matcher = new SequenceMatcher(stages, false, TimeValue.MINUS_ONE, null, CIRCUIT_BREAKER); TumblingWindow window = new TumblingWindow(client, criteria, null, matcher); @@ -187,8 +167,10 @@ public void testCircuitBreakerSequenceMatcher() { assertEquals("sequence_inflight", e.getMessage()); // Break on second iteration - SequenceMatcher matcher2 = new SequenceMatcher(stages, false, TimeValue.MINUS_ONE, null, new EqlTestCircuitBreaker(15000)); + EqlTestCircuitBreaker breaker = new EqlTestCircuitBreaker(15000); + SequenceMatcher matcher2 = new SequenceMatcher(stages, false, TimeValue.MINUS_ONE, null, breaker); matcher2.match(0, hits); + assertEquals(matcher2.ramBytesUsedInFlight() + matcher2.ramBytesUsedCompleted(), breaker.ramBytesUsed); e = expectThrows(CircuitBreakingException.class, () -> matcher2.match(0, hits)); assertEquals("sequence_inflight", e.getMessage()); @@ -210,92 +192,18 @@ public void testMemoryClearedOnShardsException() { } private void assertMemoryCleared(int sequenceFiltersCount, BiFunction esClientSupplier) { - final int SEARCH_REQUESTS_EXPECTED_COUNT = 2; - List eqlBreakerSettings = Collections.singletonList( - new BreakerSettings( - CIRCUIT_BREAKER_NAME, - CIRCUIT_BREAKER_LIMIT, - CIRCUIT_BREAKER_OVERHEAD, - CircuitBreaker.Type.MEMORY, - CircuitBreaker.Durability.TRANSIENT - ) - ); + final int searchRequestsExpectedCount = 2; try ( CircuitBreakerService service = new HierarchyCircuitBreakerService( Settings.EMPTY, - eqlBreakerSettings, + breakerSettings(), new ClusterSettings(Settings.EMPTY, ClusterSettings.BUILT_IN_CLUSTER_SETTINGS) ); - ESMockClient esClient = esClientSupplier.apply(service.getBreaker(CIRCUIT_BREAKER_NAME), SEARCH_REQUESTS_EXPECTED_COUNT); + ESMockClient esClient = esClientSupplier.apply(service.getBreaker(CIRCUIT_BREAKER_NAME), searchRequestsExpectedCount); ) { CircuitBreaker eqlCircuitBreaker = service.getBreaker(CIRCUIT_BREAKER_NAME); - EqlConfiguration eqlConfiguration = new EqlConfiguration( - new String[] { "test" }, - org.elasticsearch.xpack.ql.util.DateUtils.UTC, - "nobody", - "cluster", - null, - emptyMap(), - null, - TimeValue.timeValueSeconds(30), - null, - 123, - "", - new TaskId("test", 123), - new EqlSearchTask( - randomLong(), - "transport", - EqlSearchAction.NAME, - "", - null, - emptyMap(), - emptyMap(), - new AsyncExecutionId("", new TaskId(randomAlphaOfLength(10), 1)), - TimeValue.timeValueDays(5) - ), - x -> Collections.emptySet() - ); - IndexResolver indexResolver = new IndexResolver( - esClient, - "cluster", - DefaultDataTypeRegistry.INSTANCE, - () -> { return emptySet(); } - ); - EqlSession eqlSession = new EqlSession( - esClient, - eqlConfiguration, - indexResolver, - new PreAnalyzer(), - new PostAnalyzer(), - new EqlFunctionRegistry(), - new Verifier(new Metrics()), - new Optimizer(), - new Planner(), - eqlCircuitBreaker - ); - QueryClient eqlClient = new PITAwareQueryClient(eqlSession); - List> criteria = new ArrayList<>(sequenceFiltersCount); - - for (int i = 0; i < sequenceFiltersCount; i++) { - final int j = i; - criteria.add( - new Criterion<>( - i, - new BoxedQueryRequest( - () -> SearchSourceBuilder.searchSource().size(10).query(matchAllQuery()).terminateAfter(j), - "@timestamp", - emptyList(), - emptySet() - ), - keyExtractors, - tsExtractor, - null, - implicitTbExtractor, - false - ) - ); - } - + QueryClient eqlClient = buildQueryClient(esClient, eqlCircuitBreaker); + List> criteria = buildCriteria(sequenceFiltersCount); SequenceMatcher matcher = new SequenceMatcher(sequenceFiltersCount, false, TimeValue.MINUS_ONE, null, eqlCircuitBreaker); TumblingWindow window = new TumblingWindow(eqlClient, criteria, null, matcher); window.execute(wrap(p -> {}, ex -> {})); @@ -306,6 +214,112 @@ private void assertMemoryCleared(int sequenceFiltersCount, BiFunction> criteria = buildCriteria(sequenceFiltersCount); + + SequenceMatcher matcher = new SequenceMatcher(sequenceFiltersCount, false, TimeValue.MINUS_ONE, null, eqlCircuitBreaker); + TumblingWindow window = new TumblingWindow(eqlClient, criteria, null, matcher); + window.execute(wrap(p -> fail(), ex -> assertTrue(ex instanceof CircuitBreakingException))); + } + } + + private List breakerSettings() { + List eqlBreakerSettings = Collections.singletonList( + new BreakerSettings( + CIRCUIT_BREAKER_NAME, + CIRCUIT_BREAKER_LIMIT, + CIRCUIT_BREAKER_OVERHEAD, + CircuitBreaker.Type.MEMORY, + CircuitBreaker.Durability.TRANSIENT + ) + ); + return eqlBreakerSettings; + } + + private List> buildCriteria(int sequenceFiltersCount) { + List> criteria = new ArrayList<>(sequenceFiltersCount); + for (int i = 0; i < sequenceFiltersCount; i++) { + final int j = i; + criteria.add( + new Criterion<>( + i, + new BoxedQueryRequest( + () -> SearchSourceBuilder.searchSource().size(10).query(matchAllQuery()).terminateAfter(j), + "@timestamp", + emptyList(), + emptySet() + ), + keyExtractors, + tsExtractor, + null, + implicitTbExtractor, + false + ) + ); + } + return criteria; + } + + private QueryClient buildQueryClient(ESMockClient esClient, CircuitBreaker eqlCircuitBreaker) { + EqlConfiguration eqlConfiguration = new EqlConfiguration( + new String[] { "test" }, + org.elasticsearch.xpack.ql.util.DateUtils.UTC, + "nobody", + "cluster", + null, + emptyMap(), + null, + TimeValue.timeValueSeconds(30), + null, + 123, + "", + new TaskId("test", 123), + new EqlSearchTask( + randomLong(), + "transport", + EqlSearchAction.NAME, + "", + null, + emptyMap(), + emptyMap(), + new AsyncExecutionId("", new TaskId(randomAlphaOfLength(10), 1)), + TimeValue.timeValueDays(5) + ), + x -> Collections.emptySet() + ); + IndexResolver indexResolver = new IndexResolver(esClient, "cluster", DefaultDataTypeRegistry.INSTANCE, Collections::emptySet); + EqlSession eqlSession = new EqlSession( + esClient, + eqlConfiguration, + indexResolver, + new PreAnalyzer(), + new PostAnalyzer(), + new EqlFunctionRegistry(), + new Verifier(new Metrics()), + new Optimizer(), + new Planner(), + eqlCircuitBreaker + ); + return new PITAwareQueryClient(eqlSession); + } + /** * A type of internal Node client that deals with three types of requests: open PIT, close PIT and SearchRequest. * This class is used by {@code CircuitBreakerTests#testMemoryClearedOnSuccessfulRequest()} and diff --git a/x-pack/plugin/ilm/qa/multi-node/src/javaRestTest/java/org/elasticsearch/xpack/ilm/actions/RollupActionIT.java b/x-pack/plugin/ilm/qa/multi-node/src/javaRestTest/java/org/elasticsearch/xpack/ilm/actions/RollupActionIT.java index 7bfe664e686d4..d329e713a3b0e 100644 --- a/x-pack/plugin/ilm/qa/multi-node/src/javaRestTest/java/org/elasticsearch/xpack/ilm/actions/RollupActionIT.java +++ b/x-pack/plugin/ilm/qa/multi-node/src/javaRestTest/java/org/elasticsearch/xpack/ilm/actions/RollupActionIT.java @@ -146,6 +146,7 @@ public void testRollupIndex() throws Exception { String phaseName = randomFrom("warm", "cold"); createNewSingletonPolicy(client(), policy, phaseName, new RollupILMAction(ConfigTestHelpers.randomInterval())); updatePolicy(client(), index, policy); + updateClusterSettings(client(), Settings.builder().put("indices.lifecycle.poll_interval", "5s").build()); String rollupIndex = waitAndGetRollupIndexName(client(), index); assertNotNull("Cannot retrieve rollup index name", rollupIndex); diff --git a/x-pack/plugin/ilm/qa/multi-node/src/javaRestTest/java/org/elasticsearch/xpack/ilm/actions/SearchableSnapshotActionIT.java b/x-pack/plugin/ilm/qa/multi-node/src/javaRestTest/java/org/elasticsearch/xpack/ilm/actions/SearchableSnapshotActionIT.java index 849b00464f0fb..75b5ab9b0c3ab 100644 --- a/x-pack/plugin/ilm/qa/multi-node/src/javaRestTest/java/org/elasticsearch/xpack/ilm/actions/SearchableSnapshotActionIT.java +++ b/x-pack/plugin/ilm/qa/multi-node/src/javaRestTest/java/org/elasticsearch/xpack/ilm/actions/SearchableSnapshotActionIT.java @@ -664,6 +664,9 @@ public void testSearchableSnapshotsInHotPhasePinnedToHotNodes() throws Exception Map hotIndexSettings = getIndexSettingsAsMap(restoredIndex); // searchable snapshots mounted in the hot phase should be pinned to hot nodes assertThat(hotIndexSettings.get(DataTier.TIER_PREFERENCE), is("data_hot")); + + assertOK(client().performRequest(new Request("DELETE", "_data_stream/" + dataStream))); + assertOK(client().performRequest(new Request("DELETE", "_ilm/policy/" + policy))); } // See: https://github.com/elastic/elasticsearch/issues/77269 diff --git a/x-pack/plugin/ml/build.gradle b/x-pack/plugin/ml/build.gradle index 155736a75e37d..86802009abde3 100644 --- a/x-pack/plugin/ml/build.gradle +++ b/x-pack/plugin/ml/build.gradle @@ -1,3 +1,5 @@ +import org.elasticsearch.gradle.VersionProperties + apply plugin: 'elasticsearch.internal-es-plugin' apply plugin: 'elasticsearch.internal-cluster-test' apply plugin: 'elasticsearch.internal-test-artifact' @@ -10,25 +12,39 @@ esplugin { extendedPlugins = ['x-pack-autoscaling', 'lang-painless'] } +def localRepo = providers.systemProperty('build.ml_cpp.repo').orNull repositories { exclusiveContent { + filter { + includeGroup 'org.elasticsearch.ml' + } forRepository { ivy { name "ml-cpp" - url providers.systemProperty('build.ml_cpp.repo').orElse('https://prelert-artifacts.s3.amazonaws.com').get() metadataSources { // no repository metadata, look directly for the artifact artifact() } - patternLayout { - artifact "maven/org/elasticsearch/ml/ml-cpp/[revision]/[module]-[revision](-[classifier]).[ext]" + if (localRepo) { + url localRepo + patternLayout { + artifact "maven/[orgPath]/[module]/[revision]/[module]-[revision](-[classifier]).[ext]" + } + } else { + url "https://artifacts-snapshot.elastic.co/" + patternLayout { + if (VersionProperties.isElasticsearchSnapshot()) { + artifact '/ml-cpp/[revision]/downloads/ml-cpp/[module]-[revision]-[classifier].[ext]' + } else { + // When building locally we always use snapshot artifacts even if passing `-Dbuild.snapshot=false`. + // Release builds are always done with a local repo. + artifact '/ml-cpp/[revision]-SNAPSHOT/downloads/ml-cpp/[module]-[revision]-SNAPSHOT-[classifier].[ext]' + } + } } } } - filter { - includeGroup 'org.elasticsearch.ml' - } } } diff --git a/x-pack/plugin/ml/licenses/lucene-analysis-icu-9.3.0-snapshot-b8d1fcfd0ec.jar.sha1 b/x-pack/plugin/ml/licenses/lucene-analysis-icu-9.3.0-snapshot-b8d1fcfd0ec.jar.sha1 deleted file mode 100644 index ceea8ba4f6855..0000000000000 --- a/x-pack/plugin/ml/licenses/lucene-analysis-icu-9.3.0-snapshot-b8d1fcfd0ec.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -501aa4f0028424a994b06627f30ffb36150ffbe2 \ No newline at end of file diff --git a/x-pack/plugin/ml/licenses/lucene-analysis-icu-9.3.0.jar.sha1 b/x-pack/plugin/ml/licenses/lucene-analysis-icu-9.3.0.jar.sha1 new file mode 100644 index 0000000000000..df4ae8d72dd2b --- /dev/null +++ b/x-pack/plugin/ml/licenses/lucene-analysis-icu-9.3.0.jar.sha1 @@ -0,0 +1 @@ +11dd9be0448fe594cf918f5260e193b3ab4e07a0 \ No newline at end of file diff --git a/x-pack/plugin/ml/qa/native-multi-node-tests/src/javaRestTest/java/org/elasticsearch/xpack/ml/integration/CategorizationIT.java b/x-pack/plugin/ml/qa/native-multi-node-tests/src/javaRestTest/java/org/elasticsearch/xpack/ml/integration/CategorizationIT.java index 9f33fc1d862e9..8aa53fce41c4d 100644 --- a/x-pack/plugin/ml/qa/native-multi-node-tests/src/javaRestTest/java/org/elasticsearch/xpack/ml/integration/CategorizationIT.java +++ b/x-pack/plugin/ml/qa/native-multi-node-tests/src/javaRestTest/java/org/elasticsearch/xpack/ml/integration/CategorizationIT.java @@ -6,7 +6,6 @@ */ package org.elasticsearch.xpack.ml.integration; -import org.apache.logging.log4j.LogManager; import org.elasticsearch.action.bulk.BulkRequestBuilder; import org.elasticsearch.action.bulk.BulkResponse; import org.elasticsearch.action.index.IndexRequest; @@ -32,7 +31,6 @@ import org.elasticsearch.xpack.core.ml.job.process.autodetect.state.CategorizerStats; import org.elasticsearch.xpack.core.ml.job.results.CategoryDefinition; import org.elasticsearch.xpack.core.ml.job.results.Result; -import org.elasticsearch.xpack.ml.MachineLearning; import org.junit.After; import org.junit.Before; @@ -342,58 +340,6 @@ public void testCategorizationStatePersistedOnSwitchToRealtime() throws Exceptio ); } - public void testCategorizationPerformance() { - // To compare Java/C++ tokenization performance: - // 1. Change false to true in this assumption - // 2. Run the test several times - // 3. Change MachineLearning.CATEGORIZATION_TOKENIZATION_IN_JAVA to false - // 4. Run the test several more times - // 5. Check the timings that get logged - // 6. Revert the changes to this assumption and MachineLearning.CATEGORIZATION_TOKENIZATION_IN_JAVA - assumeTrue("This is time consuming to run on every build - it should be run manually when comparing Java/C++ tokenization", false); - - int testBatchSize = 1000; - int testNumBatches = 1000; - String[] possibleMessages = new String[] { - " Source LOTS on 33080:817 has shut down.", - " P2PS failed to connect to the hrm server. " - + "Reason: Failed to connect to hrm server - No ACK from SIPC", - " Did not receive an image data for IDN_SELECTFEED:7630.T on 493. " - + "Recalling item. ", - " " - + "RRCP STATUS MSG: RRCP_REBOOT: node 33191 has rebooted", - " Source PRISM_VOBr on 33069:757 has shut down.", - " Service PRISM_VOB has shut down." }; - - String jobId = "categorization-performance"; - Job.Builder job = newJobBuilder(jobId, Collections.emptyList(), false); - putJob(job); - openJob(job.getId()); - - long startTime = System.currentTimeMillis(); - - for (int batchNum = 0; batchNum < testNumBatches; ++batchNum) { - StringBuilder json = new StringBuilder(testBatchSize * 100); - for (int docNum = 0; docNum < testBatchSize; ++docNum) { - json.append( - String.format(Locale.ROOT, "{\"time\":1000000,\"msg\":\"%s\"}\n", possibleMessages[docNum % possibleMessages.length]) - ); - } - postData(jobId, json.toString()); - } - flushJob(jobId, false); - - long duration = System.currentTimeMillis() - startTime; - LogManager.getLogger(CategorizationIT.class) - .info( - "Performance test with tokenization in " - + (MachineLearning.CATEGORIZATION_TOKENIZATION_IN_JAVA ? "Java" : "C++") - + " took " - + duration - + "ms" - ); - } - public void testStopOnWarn() throws IOException { long testTime = System.currentTimeMillis(); diff --git a/x-pack/plugin/ml/qa/native-multi-node-tests/src/javaRestTest/java/org/elasticsearch/xpack/ml/integration/PyTorchModelIT.java b/x-pack/plugin/ml/qa/native-multi-node-tests/src/javaRestTest/java/org/elasticsearch/xpack/ml/integration/PyTorchModelIT.java index 40eb8a77913b0..9a16b50d73235 100644 --- a/x-pack/plugin/ml/qa/native-multi-node-tests/src/javaRestTest/java/org/elasticsearch/xpack/ml/integration/PyTorchModelIT.java +++ b/x-pack/plugin/ml/qa/native-multi-node-tests/src/javaRestTest/java/org/elasticsearch/xpack/ml/integration/PyTorchModelIT.java @@ -812,6 +812,71 @@ public void testStartDeployment_TooManyAllocations() throws IOException { assertThat(EntityUtils.toString(response.getEntity()), not(containsString("deployment_stats"))); } + @SuppressWarnings("unchecked") + public void testStartDeployment_GivenNoProcessorsLeft_AndLazyStartEnabled() throws Exception { + // We start 2 models. The first needs so many allocations it won't possibly + // get them all. This would leave no space to allocate the second model at all. + + // Enable lazy starting so that the deployments start even if they cannot get fully allocated. + // The setting is cleared in the cleanup method of these tests. + Request loggingSettings = new Request("PUT", "_cluster/settings"); + loggingSettings.setJsonEntity(""" + {"persistent" : { + "xpack.ml.max_lazy_ml_nodes": 5 + }}"""); + client().performRequest(loggingSettings); + + String modelId1 = "model_1"; + createTrainedModel(modelId1); + putModelDefinition(modelId1); + putVocabulary(List.of("these", "are", "my", "words"), modelId1); + + String modelId2 = "model_2"; + createTrainedModel(modelId2); + putModelDefinition(modelId2); + putVocabulary(List.of("these", "are", "my", "words"), modelId2); + + startDeployment(modelId1, AllocationStatus.State.STARTED.toString(), 100, 1); + + { + Request request = new Request( + "POST", + "/_ml/trained_models/" + + modelId2 + + "/deployment/_start?timeout=40s&wait_for=starting&" + + "number_of_allocations=4&threads_per_allocation=2&queue_capacity=500&cache_size=100Kb" + ); + client().performRequest(request); + } + + // Check second model did not get any allocations + assertAllocationCount(modelId2, 0); + + // Verify stats shows model is starting and deployment settings are present + { + Response statsResponse = getTrainedModelStats(modelId2); + var responseMap = entityAsMap(statsResponse); + List> stats = (List>) responseMap.get("trained_model_stats"); + assertThat(stats, hasSize(1)); + String statusState = (String) XContentMapValues.extractValue("deployment_stats.allocation_status.state", stats.get(0)); + assertThat(statusState, equalTo("starting")); + int numberOfAllocations = (int) XContentMapValues.extractValue("deployment_stats.number_of_allocations", stats.get(0)); + assertThat(numberOfAllocations, equalTo(4)); + int threadsPerAllocation = (int) XContentMapValues.extractValue("deployment_stats.threads_per_allocation", stats.get(0)); + assertThat(threadsPerAllocation, equalTo(2)); + int queueCapacity = (int) XContentMapValues.extractValue("deployment_stats.queue_capacity", stats.get(0)); + assertThat(queueCapacity, equalTo(500)); + ByteSizeValue cacheSize = ByteSizeValue.parseBytesSizeValue( + (String) XContentMapValues.extractValue("deployment_stats.cache_size", stats.get(0)), + "cache_size)" + ); + assertThat(cacheSize, equalTo(ByteSizeValue.ofKb(100))); + } + + stopDeployment(modelId1); + stopDeployment(modelId2); + } + @SuppressWarnings("unchecked") private void assertAllocationCount(String modelId, int expectedAllocationCount) throws IOException { Response response = getTrainedModelStats(modelId); diff --git a/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/MachineLearning.java b/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/MachineLearning.java index 6f860f370962d..1973c43aac6e1 100644 --- a/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/MachineLearning.java +++ b/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/MachineLearning.java @@ -473,9 +473,6 @@ public class MachineLearning extends Plugin private static final long DEFAULT_MODEL_CIRCUIT_BREAKER_LIMIT = (long) ((0.50) * JvmInfo.jvmInfo().getMem().getHeapMax().getBytes()); private static final double DEFAULT_MODEL_CIRCUIT_BREAKER_OVERHEAD = 1.0D; - // This is for performance testing. It's not exposed to the end user. - // Recompile if you want to compare performance with C++ tokenization. - public static final boolean CATEGORIZATION_TOKENIZATION_IN_JAVA = true; public static final LicensedFeature.Persistent ML_ANOMALY_JOBS_FEATURE = LicensedFeature.persistent( MachineLearningField.ML_FEATURE_FAMILY, diff --git a/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/action/TransportDeleteExpiredDataAction.java b/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/action/TransportDeleteExpiredDataAction.java index a6c39a971fd31..bdb01a2c70f06 100644 --- a/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/action/TransportDeleteExpiredDataAction.java +++ b/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/action/TransportDeleteExpiredDataAction.java @@ -9,6 +9,7 @@ import org.apache.logging.log4j.LogManager; import org.apache.logging.log4j.Logger; import org.elasticsearch.action.ActionListener; +import org.elasticsearch.action.ActionRunnable; import org.elasticsearch.action.support.ActionFilters; import org.elasticsearch.action.support.HandledTransportAction; import org.elasticsearch.action.support.ThreadedActionListener; @@ -137,17 +138,25 @@ protected void doExecute( if (Strings.isNullOrEmpty(request.getJobId()) || Strings.isAllOrWildcard(request.getJobId())) { List dataRemovers = createDataRemovers(client, taskId, anomalyDetectionAuditor); threadPool.executor(MachineLearning.UTILITY_THREAD_POOL_NAME) - .execute(() -> deleteExpiredData(request, dataRemovers, listener, isTimedOutSupplier)); + .execute(ActionRunnable.wrap(listener, l -> deleteExpiredData(request, dataRemovers, l, isTimedOutSupplier))); } else { - jobConfigProvider.expandJobs(request.getJobId(), false, true, null, ActionListener.wrap(jobBuilders -> { - threadPool.executor(MachineLearning.UTILITY_THREAD_POOL_NAME).execute(() -> { - List jobs = jobBuilders.stream().map(Job.Builder::build).collect(Collectors.toList()); - String[] jobIds = jobs.stream().map(Job::getId).toArray(String[]::new); - request.setExpandedJobIds(jobIds); - List dataRemovers = createDataRemovers(jobs, taskId, anomalyDetectionAuditor); - deleteExpiredData(request, dataRemovers, listener, isTimedOutSupplier); - }); - }, listener::onFailure)); + jobConfigProvider.expandJobs( + request.getJobId(), + false, + true, + null, + ActionListener.wrap( + jobBuilders -> threadPool.executor(MachineLearning.UTILITY_THREAD_POOL_NAME) + .execute(ActionRunnable.wrap(listener, l -> { + List jobs = jobBuilders.stream().map(Job.Builder::build).collect(Collectors.toList()); + String[] jobIds = jobs.stream().map(Job::getId).toArray(String[]::new); + request.setExpandedJobIds(jobIds); + List dataRemovers = createDataRemovers(jobs, taskId, anomalyDetectionAuditor); + deleteExpiredData(request, dataRemovers, l, isTimedOutSupplier); + })), + listener::onFailure + ) + ); } } diff --git a/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/action/TransportGetDeploymentStatsAction.java b/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/action/TransportGetDeploymentStatsAction.java index 0154fd6d7d5ba..d9d7f9a5a7150 100644 --- a/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/action/TransportGetDeploymentStatsAction.java +++ b/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/action/TransportGetDeploymentStatsAction.java @@ -269,7 +269,17 @@ static GetDeploymentStatsAction.Response addFailedRoutes( nodeStats.sort(Comparator.comparing(n -> n.getNode().getId())); - updatedAssignmentStats.add(new AssignmentStats(modelId, null, null, null, null, assignment.getStartTime(), nodeStats)); + updatedAssignmentStats.add( + new AssignmentStats( + modelId, + assignment.getTaskParams().getThreadsPerAllocation(), + assignment.getTaskParams().getNumberOfAllocations(), + assignment.getTaskParams().getQueueCapacity(), + assignment.getTaskParams().getCacheSize().orElse(null), + assignment.getStartTime(), + nodeStats + ) + ); } } diff --git a/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/autoscaling/MlAutoscalingDeciderService.java b/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/autoscaling/MlAutoscalingDeciderService.java index 83cabd49c79c1..1e613d57b93e1 100644 --- a/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/autoscaling/MlAutoscalingDeciderService.java +++ b/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/autoscaling/MlAutoscalingDeciderService.java @@ -507,7 +507,7 @@ public AutoscalingDeciderResult scale(Settings configuration, AutoscalingDecider "view of job memory is stale given duration [{}]. Not attempting to make scaling decision", mlMemoryTracker.getStalenessDuration() ); - return buildDecisionAndRequestRefresh(reasonBuilder); + return buildDecisionAndRequestRefresh(reasonBuilder.setSimpleReason(MEMORY_STALE)); } // We need the current node loads to determine if we need to scale up or down List nodeLoads = new ArrayList<>(mlNodes.size()); @@ -1163,7 +1163,7 @@ Optional calculateFutureAvailableCapacity(PersistentTasksC private AutoscalingDeciderResult buildDecisionAndRequestRefresh(MlScalingReason.Builder reasonBuilder) { mlMemoryTracker.asyncRefresh(); - return new AutoscalingDeciderResult(null, reasonBuilder.setSimpleReason(MEMORY_STALE).build()); + return new AutoscalingDeciderResult(null, reasonBuilder.build()); } private Long getAnalyticsMemoryRequirement(String analyticsId) { diff --git a/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/inference/assignment/TrainedModelAssignmentNodeService.java b/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/inference/assignment/TrainedModelAssignmentNodeService.java index 1d48f1d1f2297..8c46427f6d249 100644 --- a/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/inference/assignment/TrainedModelAssignmentNodeService.java +++ b/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/inference/assignment/TrainedModelAssignmentNodeService.java @@ -225,14 +225,13 @@ void loadQueuedModels() { } catch (Exception ex) { logger.warn(() -> "[" + modelId + "] Start deployment failed", ex); if (ExceptionsHelper.unwrapCause(ex) instanceof ResourceNotFoundException) { - logger.warn(() -> "[" + modelId + "] Start deployment failed", ex); + logger.debug(() -> "[" + modelId + "] Start deployment failed as model was not found", ex); handleLoadFailure(loadingTask, ExceptionsHelper.missingTrainedModel(modelId, ex)); } else if (ExceptionsHelper.unwrapCause(ex) instanceof SearchPhaseExecutionException) { - logger.trace(() -> "[" + modelId + "] Start deployment failed, will retry", ex); + logger.debug(() -> "[" + modelId + "] Start deployment failed, will retry", ex); // A search phase execution failure should be retried, push task back to the queue loadingToRetry.add(loadingTask); } else { - logger.warn(() -> "[" + modelId + "] Start deployment failed", ex); handleLoadFailure(loadingTask, ex); } } @@ -413,7 +412,7 @@ private void updateNumberOfAllocations(TrainedModelAssignmentMetadata assignment for (TrainedModelAssignment assignment : modelsToUpdate) { TrainedModelDeploymentTask task = modelIdToTask.get(assignment.getModelId()); if (task == null) { - logger.debug(() -> format("[%s] task was removed whilst updating number of allocations", task.getModelId())); + logger.debug(() -> format("[%s] task was removed whilst updating number of allocations", assignment.getModelId())); continue; } RoutingInfo routingInfo = assignment.getNodeRoutingTable().get(nodeId); diff --git a/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/inference/assignment/TrainedModelAssignmentRebalancer.java b/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/inference/assignment/TrainedModelAssignmentRebalancer.java index d25623550b2e6..209a3a1fc73ab 100644 --- a/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/inference/assignment/TrainedModelAssignmentRebalancer.java +++ b/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/inference/assignment/TrainedModelAssignmentRebalancer.java @@ -111,7 +111,8 @@ AssignmentPlan computeAssignmentPlan() { assignment.getTaskParams().estimateMemoryUsageBytes(), assignment.getTaskParams().getNumberOfAllocations(), assignment.getTaskParams().getThreadsPerAllocation(), - currentAssignments + currentAssignments, + assignment.getMaxAssignedAllocations() ); }).forEach(planModels::add); modelToAdd.ifPresent( @@ -121,7 +122,8 @@ AssignmentPlan computeAssignmentPlan() { taskParams.estimateMemoryUsageBytes(), taskParams.getNumberOfAllocations(), taskParams.getThreadsPerAllocation(), - Map.of() + Map.of(), + 0 ) ) ); @@ -157,6 +159,7 @@ private TrainedModelAssignmentMetadata.Builder buildAssignmentsFromPlan(Assignme ); if (existingAssignment != null) { assignmentBuilder.setStartTime(existingAssignment.getStartTime()); + assignmentBuilder.setMaxAssignedAllocations(existingAssignment.getMaxAssignedAllocations()); } Map assignments = assignmentPlan.assignments(model).orElseGet(Map::of); diff --git a/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/inference/assignment/planning/AbstractPreserveAllocations.java b/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/inference/assignment/planning/AbstractPreserveAllocations.java index 6aa71bafb4662..4aded2f295743 100644 --- a/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/inference/assignment/planning/AbstractPreserveAllocations.java +++ b/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/inference/assignment/planning/AbstractPreserveAllocations.java @@ -57,7 +57,8 @@ Model modifyModelPreservingPreviousAssignments(Model m) { m.memoryBytes(), m.allocations() - calculatePreservedAllocations(m), m.threadsPerAllocation(), - calculateAllocationsPerNodeToPreserve(m) + calculateAllocationsPerNodeToPreserve(m), + m.maxAssignedAllocations() ); } diff --git a/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/inference/assignment/planning/AssignmentPlan.java b/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/inference/assignment/planning/AssignmentPlan.java index 24994b031d9ba..8dd1abc48309e 100644 --- a/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/inference/assignment/planning/AssignmentPlan.java +++ b/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/inference/assignment/planning/AssignmentPlan.java @@ -32,13 +32,18 @@ public record Model( long memoryBytes, int allocations, int threadsPerAllocation, - Map currentAllocationsByNodeId + Map currentAllocationsByNodeId, + int maxAssignedAllocations ) { - int getPreviouslyAssignedAllocations() { + int getCurrentAssignedAllocations() { return currentAllocationsByNodeId.values().stream().mapToInt(Integer::intValue).sum(); } + boolean hasEverBeenAllocated() { + return maxAssignedAllocations > 0; + } + @Override public String toString() { return id @@ -50,6 +55,8 @@ public String toString() { + threadsPerAllocation + ") (current_allocations = " + currentAllocationsByNodeId + + ") (max_assigned_allocations = " + + maxAssignedAllocations + ")"; } }; @@ -108,17 +115,17 @@ public int compareTo(AssignmentPlan o) { return Comparator.comparing(AssignmentPlan::computeQuality).compare(this, o); } - public boolean satisfiesPreviousAssignments() { - return models().stream().allMatch(this::isSatisfyingPreviousAssignmentsForModel); + public boolean satisfiesCurrentAssignments() { + return models().stream().allMatch(this::isSatisfyingCurrentAssignmentsForModel); } - private boolean isSatisfyingPreviousAssignmentsForModel(Model m) { + private boolean isSatisfyingCurrentAssignmentsForModel(Model m) { if (m.currentAllocationsByNodeId().isEmpty()) { return true; } Map nodeAssignments = assignments.get(m); int currentAllocations = nodeAssignments.values().stream().mapToInt(Integer::intValue).sum(); - return currentAllocations >= m.getPreviouslyAssignedAllocations(); + return currentAllocations >= m.getCurrentAssignedAllocations(); } public boolean satisfiesAllocations(Model m) { @@ -129,6 +136,21 @@ public boolean satisfiesAllModels() { return models().stream().allMatch(this::satisfiesAllocations); } + public boolean arePreviouslyAssignedModelsAssigned() { + return models().stream() + .filter(Model::hasEverBeenAllocated) + .map(this::totalAllocations) + .allMatch(totalAllocations -> totalAllocations > 0); + } + + public long countPreviouslyAssignedModelsThatAreStillAssigned() { + return models().stream() + .filter(Model::hasEverBeenAllocated) + .map(this::totalAllocations) + .filter(totalAllocations -> totalAllocations > 0) + .count(); + } + public int getRemainingNodeCores(String nodeId) { return remainingNodeCores.getOrDefault(nodeId, 0); } @@ -137,6 +159,13 @@ public long getRemainingNodeMemory(String nodeId) { return remainingNodeMemory.getOrDefault(nodeId, 0L); } + public int totalAllocations(Model m) { + if (assignments.containsKey(m) == false) { + return 0; + } + return assignments.get(m).values().stream().mapToInt(Integer::intValue).sum(); + } + private Quality computeQuality() { boolean isSatisfyingPreviousAssignments = true; double weighedAllocationsScore = 0; @@ -144,7 +173,7 @@ private Quality computeQuality() { for (Map.Entry> entry : assignments.entrySet()) { Model m = entry.getKey(); - isSatisfyingPreviousAssignments = isSatisfyingPreviousAssignments && isSatisfyingPreviousAssignmentsForModel(m); + isSatisfyingPreviousAssignments = isSatisfyingPreviousAssignments && isSatisfyingCurrentAssignmentsForModel(m); Map modelAssignments = entry.getValue(); if (modelAssignments != null) { for (Map.Entry nodeAllocations : modelAssignments.entrySet()) { diff --git a/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/inference/assignment/planning/AssignmentPlanner.java b/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/inference/assignment/planning/AssignmentPlanner.java index dd50ebdbd0f23..4c0812932dd09 100644 --- a/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/inference/assignment/planning/AssignmentPlanner.java +++ b/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/inference/assignment/planning/AssignmentPlanner.java @@ -14,7 +14,11 @@ import org.elasticsearch.xpack.ml.inference.assignment.planning.AssignmentPlan.Node; import java.util.Comparator; +import java.util.HashMap; import java.util.List; +import java.util.Map; +import java.util.Optional; +import java.util.Set; import static org.elasticsearch.core.Strings.format; @@ -34,6 +38,9 @@ * Furthermore, the planner preserves at least one allocation for all existing * assignments. This way, the new plan will only have new assignments and the * transition can happen with minimal impact on performance of started deployments. + * However, if previously assigned models do not receive any allocation, then we + * attempt to find a solution that provides at least one allocation to + * previously assigned models. */ public class AssignmentPlanner { @@ -48,44 +55,122 @@ public AssignmentPlanner(List nodes, List models) { } public AssignmentPlan computePlan() { + return computePlan(true); + } + + private AssignmentPlan computePlan(boolean tryAssigningPreviouslyAssignedModels) { logger.debug(() -> format("Computing plan for nodes = %s; models = %s", nodes, models)); AssignmentPlan bestPlan; - // First solve preserving one allocation per assignment because that is most flexible - AssignmentPlan planKeepingOneAllocationOnPreviousAssignments = solveKeepingOneAllocationOnPreviousAssignments(); - if (planKeepingOneAllocationOnPreviousAssignments.satisfiesPreviousAssignments() == false) { - bestPlan = solvePreservingAllPreviousAssignments(); - } else if (planKeepingOneAllocationOnPreviousAssignments.satisfiesAllModels() == false) { - AssignmentPlan planKeepingAllAllocationsOnPreviousAssignments = solvePreservingAllPreviousAssignments(); - bestPlan = planKeepingAllAllocationsOnPreviousAssignments.compareTo(planKeepingOneAllocationOnPreviousAssignments) >= 0 - ? planKeepingAllAllocationsOnPreviousAssignments - : planKeepingOneAllocationOnPreviousAssignments; + AssignmentPlan planSatisfyingCurrentAssignments = solveSatisfyingCurrentAssignments(); + logger.debug(() -> "Plan satisfying current assignments =\n" + planSatisfyingCurrentAssignments.prettyPrint()); + if (planSatisfyingCurrentAssignments.arePreviouslyAssignedModelsAssigned() == false && tryAssigningPreviouslyAssignedModels) { + AssignmentPlan planAllocatingAtLeastOnceModelsThatWerePreviouslyAllocated = + solveAllocatingAtLeastOnceModelsThatWerePreviouslyAllocated(); + logger.debug( + () -> "Plan with at least one allocation for previously assigned models =\n" + + planAllocatingAtLeastOnceModelsThatWerePreviouslyAllocated.prettyPrint() + ); + if (planAllocatingAtLeastOnceModelsThatWerePreviouslyAllocated.arePreviouslyAssignedModelsAssigned()) { + bestPlan = planAllocatingAtLeastOnceModelsThatWerePreviouslyAllocated; + } else { + bestPlan = planSatisfyingCurrentAssignments + .countPreviouslyAssignedModelsThatAreStillAssigned() >= planAllocatingAtLeastOnceModelsThatWerePreviouslyAllocated + .countPreviouslyAssignedModelsThatAreStillAssigned() + ? planSatisfyingCurrentAssignments + : planAllocatingAtLeastOnceModelsThatWerePreviouslyAllocated; + } } else { - bestPlan = planKeepingOneAllocationOnPreviousAssignments; + bestPlan = planSatisfyingCurrentAssignments; } + logger.debug(() -> "Best plan =\n" + bestPlan.prettyPrint()); logger.debug(() -> prettyPrintOverallStats(bestPlan)); return bestPlan; } - private AssignmentPlan solveKeepingOneAllocationOnPreviousAssignments() { + private AssignmentPlan solveSatisfyingCurrentAssignments() { + AssignmentPlan bestPlan; + // First solve preserving one allocation per assignment because that is most flexible + AssignmentPlan planKeepingOneAllocationOnCurrentAssignments = solveKeepingOneAllocationOnCurrentAssignments(); + if (planKeepingOneAllocationOnCurrentAssignments.satisfiesCurrentAssignments() == false) { + bestPlan = solvePreservingAllAllocationsOnCurrentAssignments(); + } else if (planKeepingOneAllocationOnCurrentAssignments.satisfiesAllModels() == false) { + AssignmentPlan planKeepingAllAllocationsOnCurrentAssignments = solvePreservingAllAllocationsOnCurrentAssignments(); + bestPlan = planKeepingAllAllocationsOnCurrentAssignments.compareTo(planKeepingOneAllocationOnCurrentAssignments) >= 0 + ? planKeepingAllAllocationsOnCurrentAssignments + : planKeepingOneAllocationOnCurrentAssignments; + } else { + bestPlan = planKeepingOneAllocationOnCurrentAssignments; + } + return bestPlan; + } + + private AssignmentPlan solveAllocatingAtLeastOnceModelsThatWerePreviouslyAllocated() { + logger.debug(() -> "Attempting to solve assigning at least one allocations to previously assigned models"); + List previouslyAssignedModelsOnly = models.stream() + .filter(m -> m.hasEverBeenAllocated()) + .map( + m -> new Model( + m.id(), + m.memoryBytes(), + 1, + m.threadsPerAllocation(), + m.currentAllocationsByNodeId(), + m.maxAssignedAllocations() + ) + ) + .toList(); + AssignmentPlan planWithSingleAllocationForPreviouslyAssignedModels = new LinearProgrammingPlanSolver( + nodes, + previouslyAssignedModelsOnly + ).solvePlan(true); + + Map modelIdToNodeIdWithSingleAllocation = new HashMap<>(); + for (Model m : planWithSingleAllocationForPreviouslyAssignedModels.models()) { + Optional> assignments = planWithSingleAllocationForPreviouslyAssignedModels.assignments(m); + Set nodes = assignments.orElse(Map.of()).keySet(); + if (nodes.isEmpty() == false) { + assert nodes.size() == 1; + modelIdToNodeIdWithSingleAllocation.put(m.id(), nodes.iterator().next().id()); + } + } + + List planModels = models.stream().map(m -> { + Map currentAllocationsByNodeId = modelIdToNodeIdWithSingleAllocation.containsKey(m.id()) + ? Map.of(modelIdToNodeIdWithSingleAllocation.get(m.id()), 1) + : Map.of(); + return new Model( + m.id(), + m.memoryBytes(), + m.allocations(), + m.threadsPerAllocation(), + currentAllocationsByNodeId, + m.maxAssignedAllocations() + ); + }).toList(); + + return new AssignmentPlanner(nodes, planModels).computePlan(false); + } + + private AssignmentPlan solveKeepingOneAllocationOnCurrentAssignments() { // We do not want to ever completely unassign a model from a node so we // can move allocations without having temporary impact on performance. - logger.trace(() -> format("Solving preserving one allocation on previous assignments")); - return solvePreservingPreviousAssignments(new PreserveOneAllocation(nodes, models)); + logger.trace(() -> format("Solving preserving one allocation on current assignments")); + return solvePreservingCurrentAssignments(new PreserveOneAllocation(nodes, models)); } - private AssignmentPlan solvePreservingAllPreviousAssignments() { - logger.trace(() -> format("Solving preserving all allocations on previous assignments")); - return solvePreservingPreviousAssignments(new PreserveAllAllocations(nodes, models)); + private AssignmentPlan solvePreservingAllAllocationsOnCurrentAssignments() { + logger.trace(() -> format("Solving preserving all allocations on current assignments")); + return solvePreservingCurrentAssignments(new PreserveAllAllocations(nodes, models)); } - private AssignmentPlan solvePreservingPreviousAssignments(AbstractPreserveAllocations preserveAllocations) { + private AssignmentPlan solvePreservingCurrentAssignments(AbstractPreserveAllocations preserveAllocations) { List planNodes = preserveAllocations.nodesPreservingAllocations(); List planModels = preserveAllocations.modelsPreservingAllocations(); logger.trace(() -> format("Nodes after applying allocation preserving strategy = %s", planNodes)); logger.trace(() -> format("Models after applying allocation preserving strategy = %s", planModels)); - AssignmentPlan assignmentPlan = new LinearProgrammingPlanSolver(planNodes, planModels).solvePlan(); + AssignmentPlan assignmentPlan = new LinearProgrammingPlanSolver(planNodes, planModels).solvePlan(false); return preserveAllocations.mergePreservedAllocations(assignmentPlan); } diff --git a/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/inference/assignment/planning/LinearProgrammingPlanSolver.java b/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/inference/assignment/planning/LinearProgrammingPlanSolver.java index e798eb0b1db9c..61268946335e4 100644 --- a/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/inference/assignment/planning/LinearProgrammingPlanSolver.java +++ b/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/inference/assignment/planning/LinearProgrammingPlanSolver.java @@ -92,13 +92,17 @@ class LinearProgrammingPlanSolver { .collect(Collectors.toMap(Function.identity(), m -> m.memoryBytes() / (double) maxModelMemoryBytes)); } - AssignmentPlan solvePlan() { + AssignmentPlan solvePlan(boolean useBinPackingOnly) { if (models.isEmpty() || maxNodeCores == 0) { return AssignmentPlan.builder(nodes, models).build(); } Tuple, Double>, AssignmentPlan> weightsAndBinPackingPlan = calculateWeightsAndBinPackingPlan(); + if (useBinPackingOnly) { + return weightsAndBinPackingPlan.v2(); + } + Map, Double> allocationValues = new HashMap<>(); Map, Double> assignmentValues = new HashMap<>(); if (solveLinearProgram(weightsAndBinPackingPlan.v1(), allocationValues, assignmentValues) == false) { @@ -275,7 +279,7 @@ private boolean solveLinearProgram( // Each model should not get more allocations than is required. // Also, if the model has previous assignments, it should get at least as many allocations as it did before. model.addExpression("allocations_of_model_" + m.id() + "_not_more_than_required") - .lower(m.getPreviouslyAssignedAllocations()) + .lower(m.getCurrentAssignedAllocations()) .upper(m.allocations()) .setLinearFactorsSimple(varsForModel(m, allocationVars)); } diff --git a/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/inference/deployment/DeploymentManager.java b/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/inference/deployment/DeploymentManager.java index 0d917debe3d02..4e6fe4fc0ca2e 100644 --- a/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/inference/deployment/DeploymentManager.java +++ b/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/inference/deployment/DeploymentManager.java @@ -61,6 +61,7 @@ import java.util.concurrent.atomic.AtomicLong; import java.util.function.Consumer; +import static org.elasticsearch.core.Strings.format; import static org.elasticsearch.xpack.core.ClientHelper.ML_ORIGIN; import static org.elasticsearch.xpack.core.ClientHelper.executeAsyncWithOrigin; @@ -149,34 +150,46 @@ private void doStartDeployment(TrainedModelDeploymentTask task, ActionListener { - if (searchVocabResponse.getHits().getHits().length == 0) { - listener.onFailure( - new ResourceNotFoundException( - Messages.getMessage( - Messages.VOCABULARY_NOT_FOUND, - task.getModelId(), - VocabularyConfig.docId(modelConfig.getModelId()) + if (modelConfig.getInferenceConfig()instanceof NlpConfig nlpConfig) { + task.init(nlpConfig); + + SearchRequest searchRequest = vocabSearchRequest(nlpConfig.getVocabularyConfig(), modelConfig.getModelId()); + executeAsyncWithOrigin(client, ML_ORIGIN, SearchAction.INSTANCE, searchRequest, ActionListener.wrap(searchVocabResponse -> { + if (searchVocabResponse.getHits().getHits().length == 0) { + listener.onFailure( + new ResourceNotFoundException( + Messages.getMessage( + Messages.VOCABULARY_NOT_FOUND, + task.getModelId(), + VocabularyConfig.docId(modelConfig.getModelId()) + ) ) - ) + ); + return; + } + + Vocabulary vocabulary = parseVocabularyDocLeniently(searchVocabResponse.getHits().getAt(0)); + NlpTask nlpTask = new NlpTask(nlpConfig, vocabulary); + NlpTask.Processor processor = nlpTask.createProcessor(); + processContext.nlpTaskProcessor.set(processor); + // here, we are being called back on the searching thread, which MAY be a network thread + // `startAndLoad` creates named pipes, blocking the calling thread, better to execute that in our utility + // executor. + executorServiceForDeployment.execute( + () -> startAndLoad(processContext, modelConfig.getLocation(), modelLoadedListener) ); - return; - } - - Vocabulary vocabulary = parseVocabularyDocLeniently(searchVocabResponse.getHits().getAt(0)); - NlpTask nlpTask = new NlpTask(nlpConfig, vocabulary); - NlpTask.Processor processor = nlpTask.createProcessor(); - processContext.nlpTaskProcessor.set(processor); - // here, we are being called back on the searching thread, which MAY be a network thread - // `startAndLoad` creates named pipes, blocking the calling thread, better to execute that in our utility - // executor. - executorServiceForDeployment.execute(() -> startAndLoad(processContext, modelConfig.getLocation(), modelLoadedListener)); - }, listener::onFailure)); + }, listener::onFailure)); + } else { + listener.onFailure( + new IllegalArgumentException( + format( + "[%s] must be a pytorch model; found inference config of kind [%s]", + modelConfig.getModelId(), + modelConfig.getInferenceConfig().getWriteableName() + ) + ) + ); + } }, listener::onFailure); executeAsyncWithOrigin( @@ -206,7 +219,7 @@ Vocabulary parseVocabularyDocLeniently(SearchHit hit) throws IOException { stream ) ) { - return Vocabulary.createParser(true).apply(parser, null); + return Vocabulary.PARSER.apply(parser, null); } catch (IOException e) { logger.error(() -> "failed to parse trained model vocabulary [" + hit.getId() + "]", e); throw e; @@ -404,10 +417,12 @@ private Consumer onProcessCrash() { } void loadModel(TrainedModelLocation modelLocation, ActionListener listener) { - if (modelLocation instanceof IndexLocation) { - process.get().loadModel(task.getModelId(), ((IndexLocation) modelLocation).getIndexName(), stateStreamer, listener); + if (modelLocation instanceof IndexLocation indexLocation) { + process.get().loadModel(task.getModelId(), indexLocation.getIndexName(), stateStreamer, listener); } else { - throw new IllegalStateException("unsupported trained model location [" + modelLocation.getClass().getSimpleName() + "]"); + listener.onFailure( + new IllegalStateException("unsupported trained model location [" + modelLocation.getClass().getSimpleName() + "]") + ); } } diff --git a/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/inference/nlp/Vocabulary.java b/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/inference/nlp/Vocabulary.java index 7665c61b76ce5..6deb9a8b6d0fb 100644 --- a/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/inference/nlp/Vocabulary.java +++ b/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/inference/nlp/Vocabulary.java @@ -45,6 +45,8 @@ public static ConstructingObjectParser createParser(boolean ig return parser; } + public static ConstructingObjectParser PARSER = createParser(true); + private final List vocab; private final List merges; private final String modelId; diff --git a/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/inference/nlp/ZeroShotClassificationProcessor.java b/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/inference/nlp/ZeroShotClassificationProcessor.java index e932df01604ad..6deca79272d1c 100644 --- a/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/inference/nlp/ZeroShotClassificationProcessor.java +++ b/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/inference/nlp/ZeroShotClassificationProcessor.java @@ -55,7 +55,7 @@ public class ZeroShotClassificationProcessor extends NlpTask.Processor { "zero_shot_classification requires [entailment] and [contradiction] in classification_labels" ); } - this.labels = Optional.ofNullable(config.getLabels()).orElse(List.of()).toArray(String[]::new); + this.labels = config.getLabels().orElse(List.of()).toArray(String[]::new); this.hypothesisTemplate = config.getHypothesisTemplate(); this.isMultiLabel = config.isMultiLabel(); this.resultsField = config.getResultsField(); @@ -70,7 +70,7 @@ public void validateInputs(List inputs) { public NlpTask.RequestBuilder getRequestBuilder(NlpConfig nlpConfig) { final String[] labelsValue; if (nlpConfig instanceof ZeroShotClassificationConfig zeroShotConfig) { - labelsValue = zeroShotConfig.getLabels().toArray(new String[0]); + labelsValue = zeroShotConfig.getLabels().orElse(List.of()).toArray(new String[0]); } else { labelsValue = this.labels; } @@ -86,7 +86,7 @@ public NlpTask.ResultProcessor getResultProcessor(NlpConfig nlpConfig) { final boolean isMultiLabelValue; final String resultsFieldValue; if (nlpConfig instanceof ZeroShotClassificationConfig zeroShotConfig) { - labelsValue = zeroShotConfig.getLabels().toArray(new String[0]); + labelsValue = zeroShotConfig.getLabels().orElse(List.of()).toArray(new String[0]); isMultiLabelValue = zeroShotConfig.isMultiLabel(); resultsFieldValue = zeroShotConfig.getResultsField(); } else { diff --git a/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/inference/nlp/tokenizers/BasicTokenFilter.java b/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/inference/nlp/tokenizers/BasicTokenFilter.java index 8828efa4af1eb..3be4eded99894 100644 --- a/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/inference/nlp/tokenizers/BasicTokenFilter.java +++ b/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/inference/nlp/tokenizers/BasicTokenFilter.java @@ -140,25 +140,30 @@ public boolean incrementToken() throws IOException { return false; } - void stripAccent() { + private void stripAccent() { accentBuffer.setLength(0); + boolean changed = false; if (normalizer.quickCheck(termAtt) != Normalizer.YES) { normalizer.normalize(termAtt, accentBuffer); + changed = true; + } else { + accentBuffer.append(termAtt); } List badIndices = new ArrayList<>(); List charCount = new ArrayList<>(); int index = 0; + int deletedIndices = 0; for (PrimitiveIterator.OfInt it = accentBuffer.codePoints().iterator(); it.hasNext();) { int cp = it.next(); if (Character.getType(cp) == Character.NON_SPACING_MARK) { - badIndices.add(index); + // When we iterate to delete accents, we need to account for previously deleted ones + badIndices.add(index - deletedIndices); charCount.add(Character.charCount(cp)); + deletedIndices++; + changed = true; } index++; } - if (badIndices.isEmpty()) { - return; - } for (int i = 0; i < badIndices.size(); i++) { int badIndex = badIndices.get(i); int count = charCount.get(i); @@ -166,12 +171,14 @@ void stripAccent() { accentBuffer.deleteCharAt(badIndex); } } - termAtt.setEmpty().append(accentBuffer); + if (changed) { + termAtt.setEmpty().append(accentBuffer); + } } private LinkedList split() { LinkedList splits = new LinkedList<>(); - int startOffset = offsetAtt.startOffset(); + final int startOffset = offsetAtt.startOffset(); int charIndex = 0; int lastCharSplit = 0; for (PrimitiveIterator.OfInt it = termAtt.codePoints().iterator(); it.hasNext();) { diff --git a/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/inference/persistence/ChunkedTrainedModelRestorer.java b/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/inference/persistence/ChunkedTrainedModelRestorer.java index 40d0162e15911..2c440941b5224 100644 --- a/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/inference/persistence/ChunkedTrainedModelRestorer.java +++ b/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/inference/persistence/ChunkedTrainedModelRestorer.java @@ -10,11 +10,11 @@ import org.apache.logging.log4j.LogManager; import org.apache.logging.log4j.Logger; import org.elasticsearch.ResourceNotFoundException; -import org.elasticsearch.action.ActionListener; -import org.elasticsearch.action.search.SearchAction; import org.elasticsearch.action.search.SearchRequest; import org.elasticsearch.action.search.SearchRequestBuilder; +import org.elasticsearch.action.search.SearchResponse; import org.elasticsearch.client.internal.Client; +import org.elasticsearch.client.internal.OriginSettingClient; import org.elasticsearch.common.bytes.BytesReference; import org.elasticsearch.common.xcontent.LoggingDeprecationHandler; import org.elasticsearch.core.CheckedFunction; @@ -38,8 +38,10 @@ import java.util.concurrent.ExecutorService; import java.util.function.Consumer; +import static org.elasticsearch.core.Strings.format; import static org.elasticsearch.xpack.core.ClientHelper.ML_ORIGIN; -import static org.elasticsearch.xpack.core.ClientHelper.executeAsyncWithOrigin; +import static org.elasticsearch.xpack.ml.MachineLearning.NATIVE_INFERENCE_COMMS_THREAD_POOL_NAME; +import static org.elasticsearch.xpack.ml.MachineLearning.UTILITY_THREAD_POOL_NAME; /** * Searches for and emits {@link TrainedModelDefinitionDoc}s in @@ -71,7 +73,7 @@ public ChunkedTrainedModelRestorer( ExecutorService executorService, NamedXContentRegistry xContentRegistry ) { - this.client = client; + this.client = new OriginSettingClient(client, ML_ORIGIN); this.executorService = executorService; this.xContentRegistry = xContentRegistry; this.modelId = modelId; @@ -122,7 +124,6 @@ public void restoreModelDefinition( logger.debug("[{}] restoring model", modelId); SearchRequest searchRequest = buildSearch(client, modelId, index, searchSize, null); - executorService.execute(() -> doSearch(searchRequest, modelConsumer, successConsumer, errorConsumer)); } @@ -132,8 +133,16 @@ private void doSearch( Consumer successConsumer, Consumer errorConsumer ) { - - executeAsyncWithOrigin(client, ML_ORIGIN, SearchAction.INSTANCE, searchRequest, ActionListener.wrap(searchResponse -> { + try { + assert Thread.currentThread().getName().contains(NATIVE_INFERENCE_COMMS_THREAD_POOL_NAME) + || Thread.currentThread().getName().contains(UTILITY_THREAD_POOL_NAME) + : format( + "Must execute from [%s] or [%s] but thread is [%s]", + NATIVE_INFERENCE_COMMS_THREAD_POOL_NAME, + UTILITY_THREAD_POOL_NAME, + Thread.currentThread().getName() + ); + SearchResponse searchResponse = client.search(searchRequest).actionGet(); if (searchResponse.getHits().getHits().length == 0) { errorConsumer.accept(new ResourceNotFoundException(Messages.getMessage(Messages.MODEL_DEFINITION_NOT_FOUND, modelId))); return; @@ -182,13 +191,13 @@ private void doSearch( searchRequestBuilder.searchAfter(new Object[] { lastHit.getIndex(), lastNum }); executorService.execute(() -> doSearch(searchRequestBuilder.request(), modelConsumer, successConsumer, errorConsumer)); } - }, e -> { + } catch (Exception e) { if (ExceptionsHelper.unwrapCause(e) instanceof ResourceNotFoundException) { errorConsumer.accept(new ResourceNotFoundException(Messages.getMessage(Messages.MODEL_DEFINITION_NOT_FOUND, modelId))); } else { errorConsumer.accept(e); } - })); + } } private static SearchRequestBuilder buildSearchBuilder(Client client, String modelId, String index, int searchSize) { diff --git a/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/job/process/autodetect/AutodetectCommunicator.java b/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/job/process/autodetect/AutodetectCommunicator.java index dfcefd7560f29..5c188fa4a5930 100644 --- a/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/job/process/autodetect/AutodetectCommunicator.java +++ b/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/job/process/autodetect/AutodetectCommunicator.java @@ -27,7 +27,6 @@ import org.elasticsearch.xpack.core.ml.job.process.autodetect.state.ModelSnapshot; import org.elasticsearch.xpack.core.ml.job.process.autodetect.state.TimingStats; import org.elasticsearch.xpack.core.ml.utils.ExceptionsHelper; -import org.elasticsearch.xpack.ml.MachineLearning; import org.elasticsearch.xpack.ml.job.categorization.CategorizationAnalyzer; import org.elasticsearch.xpack.ml.job.persistence.StateStreamer; import org.elasticsearch.xpack.ml.job.process.CountingInputStream; @@ -88,8 +87,7 @@ public class AutodetectCommunicator implements Closeable { this.onFinishHandler = onFinishHandler; this.xContentRegistry = xContentRegistry; this.autodetectWorkerExecutor = autodetectWorkerExecutor; - this.includeTokensField = MachineLearning.CATEGORIZATION_TOKENIZATION_IN_JAVA - && job.getAnalysisConfig().getCategorizationFieldName() != null; + this.includeTokensField = job.getAnalysisConfig().getCategorizationFieldName() != null; } public void restoreState(ModelSnapshot modelSnapshot) { diff --git a/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/job/process/autodetect/JobModelSnapshotUpgrader.java b/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/job/process/autodetect/JobModelSnapshotUpgrader.java index 100a165895e16..9cb1df0baddef 100644 --- a/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/job/process/autodetect/JobModelSnapshotUpgrader.java +++ b/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/job/process/autodetect/JobModelSnapshotUpgrader.java @@ -213,7 +213,7 @@ protected final Map outputFieldIndexes() { } } // field for categorization tokens - if (MachineLearning.CATEGORIZATION_TOKENIZATION_IN_JAVA && job.getAnalysisConfig().getCategorizationFieldName() != null) { + if (job.getAnalysisConfig().getCategorizationFieldName() != null) { fieldIndexes.put(LengthEncodedWriter.PRETOKENISED_TOKEN_FIELD, index++); } diff --git a/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/job/process/autodetect/NativeAutodetectProcessFactory.java b/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/job/process/autodetect/NativeAutodetectProcessFactory.java index 3b837292e11c3..c25cac48b27d5 100644 --- a/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/job/process/autodetect/NativeAutodetectProcessFactory.java +++ b/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/job/process/autodetect/NativeAutodetectProcessFactory.java @@ -96,8 +96,7 @@ public AutodetectProcess createAutodetectProcess( true ); createNativeProcess(job, params, processPipes, filesToDelete); - boolean includeTokensField = MachineLearning.CATEGORIZATION_TOKENIZATION_IN_JAVA - && job.getAnalysisConfig().getCategorizationFieldName() != null; + boolean includeTokensField = job.getAnalysisConfig().getCategorizationFieldName() != null; // The extra 1 is the control field int numberOfFields = job.allInputFields().size() + (includeTokensField ? 1 : 0) + 1; diff --git a/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/job/process/autodetect/writer/AbstractDataToProcessWriter.java b/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/job/process/autodetect/writer/AbstractDataToProcessWriter.java index d2a1cea439c1b..4b2fb3deacc6d 100644 --- a/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/job/process/autodetect/writer/AbstractDataToProcessWriter.java +++ b/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/job/process/autodetect/writer/AbstractDataToProcessWriter.java @@ -56,6 +56,8 @@ public abstract class AbstractDataToProcessWriter implements DataToProcessWriter private long latestEpochMs; private long latestEpochMsThisUpload; + private Set termFields; + protected AbstractDataToProcessWriter( boolean includeControlField, boolean includeTokensField, @@ -74,6 +76,7 @@ protected AbstractDataToProcessWriter( this.logger = Objects.requireNonNull(logger); this.latencySeconds = analysisConfig.getLatency() == null ? 0 : analysisConfig.getLatency().seconds(); this.bucketSpanMs = analysisConfig.getBucketSpan().getMillis(); + this.termFields = analysisConfig.termFields(); Date date = dataCountsReporter.getLatestRecordTime(); latestEpochMsThisUpload = 0; @@ -90,6 +93,13 @@ protected AbstractDataToProcessWriter( } } + public String maybeTruncateCatgeorizationField(String categorizationField) { + if (termFields.contains(analysisConfig.getCategorizationFieldName()) == false) { + return categorizationField.substring(0, Math.min(categorizationField.length(), AnalysisConfig.MAX_CATEGORIZATION_FIELD_LENGTH)); + } + return categorizationField; + } + /** * Set up the field index mappings. This must be called before * {@linkplain DataToProcessWriter#write(InputStream, CategorizationAnalyzer, XContentType, BiConsumer)} diff --git a/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/job/process/autodetect/writer/JsonDataToProcessWriter.java b/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/job/process/autodetect/writer/JsonDataToProcessWriter.java index c803b9ad19be4..6bfc53087feac 100644 --- a/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/job/process/autodetect/writer/JsonDataToProcessWriter.java +++ b/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/job/process/autodetect/writer/JsonDataToProcessWriter.java @@ -154,12 +154,17 @@ private void writeJson(CategorizationAnalyzer categorizationAnalyzer, XContentPa for (InputOutputMap inOut : inputOutputMap) { String field = input[inOut.inputIndex]; - record[inOut.outputIndex] = (field == null) ? "" : field; + field = (field == null) ? "" : field; + if (categorizationFieldIndex != null && inOut.inputIndex == categorizationFieldIndex) { + field = maybeTruncateCatgeorizationField(field); + } + record[inOut.outputIndex] = field; } if (categorizationAnalyzer != null && categorizationFieldIndex != null) { tokenizeForCategorization(categorizationAnalyzer, input[categorizationFieldIndex], record); } + transformTimeAndWrite(record, inputFieldCount); inputFieldCount = recordReader.read(input, gotFields); diff --git a/x-pack/plugin/ml/src/test/java/org/elasticsearch/xpack/ml/inference/assignment/TrainedModelAssignmentRebalancerTests.java b/x-pack/plugin/ml/src/test/java/org/elasticsearch/xpack/ml/inference/assignment/TrainedModelAssignmentRebalancerTests.java index 942c4624b25c4..0df7073f2a8ff 100644 --- a/x-pack/plugin/ml/src/test/java/org/elasticsearch/xpack/ml/inference/assignment/TrainedModelAssignmentRebalancerTests.java +++ b/x-pack/plugin/ml/src/test/java/org/elasticsearch/xpack/ml/inference/assignment/TrainedModelAssignmentRebalancerTests.java @@ -418,7 +418,7 @@ public void testRebalance_GivenPreviousAssignments_AndRemovedNode_AndRemainingNo assertThat(assignment.getNodeRoutingTable(), is(aMapWithSize(1))); assertThat(assignment.getNodeRoutingTable(), hasKey("node-1")); assertThat(assignment.getNodeRoutingTable().get("node-1").getCurrentAllocations(), equalTo(2)); - assertThat(assignment.getNodeRoutingTable().get("node-1").getTargetAllocations(), equalTo(2)); + assertThat(assignment.getNodeRoutingTable().get("node-1").getTargetAllocations(), equalTo(1)); assertThat(assignment.getNodeRoutingTable().get("node-1").getState(), equalTo(RoutingState.STARTED)); assertThat(assignment.getReason().isPresent(), is(true)); assertThat( @@ -433,7 +433,11 @@ public void testRebalance_GivenPreviousAssignments_AndRemovedNode_AndRemainingNo TrainedModelAssignment assignment = result.getModelAssignment(previousModel2Id); assertThat(assignment, is(notNullValue())); assertThat(assignment.getAssignmentState(), equalTo(AssignmentState.STARTING)); - assertThat(assignment.getNodeRoutingTable(), is(anEmptyMap())); + assertThat(assignment.getNodeRoutingTable(), is(aMapWithSize(1))); + assertThat(assignment.getNodeRoutingTable(), hasKey("node-1")); + assertThat(assignment.getNodeRoutingTable().get("node-1").getCurrentAllocations(), equalTo(2)); + assertThat(assignment.getNodeRoutingTable().get("node-1").getTargetAllocations(), equalTo(2)); + assertThat(assignment.getNodeRoutingTable().get("node-1").getState(), equalTo(RoutingState.STARTING)); assertThat(assignment.getReason().isPresent(), is(true)); assertThat( assignment.getReason().get(), diff --git a/x-pack/plugin/ml/src/test/java/org/elasticsearch/xpack/ml/inference/assignment/planning/AssignmentPlanTests.java b/x-pack/plugin/ml/src/test/java/org/elasticsearch/xpack/ml/inference/assignment/planning/AssignmentPlanTests.java index 9648339139801..823fa139c52da 100644 --- a/x-pack/plugin/ml/src/test/java/org/elasticsearch/xpack/ml/inference/assignment/planning/AssignmentPlanTests.java +++ b/x-pack/plugin/ml/src/test/java/org/elasticsearch/xpack/ml/inference/assignment/planning/AssignmentPlanTests.java @@ -24,21 +24,21 @@ public class AssignmentPlanTests extends ESTestCase { public void testBuilderCtor_GivenDuplicateNode() { Node n = new Node("n_1", 100, 4); - Model m = new Model("m_1", 40, 1, 2, Map.of()); + Model m = new Model("m_1", 40, 1, 2, Map.of(), 0); expectThrows(IllegalArgumentException.class, () -> AssignmentPlan.builder(List.of(n, n), List.of(m))); } public void testBuilderCtor_GivenDuplicateModel() { Node n = new Node("n_1", 100, 4); - Model m = new Model("m_1", 40, 1, 2, Map.of()); + Model m = new Model("m_1", 40, 1, 2, Map.of(), 0); expectThrows(IllegalArgumentException.class, () -> AssignmentPlan.builder(List.of(n), List.of(m, m))); } public void testAssignModelToNode_GivenNoPreviousAssignment() { Node n = new Node("n_1", 100, 4); - Model m = new Model("m_1", 40, 1, 2, Map.of()); + Model m = new Model("m_1", 40, 1, 2, Map.of(), 0); AssignmentPlan.Builder builder = AssignmentPlan.builder(List.of(n), List.of(m)); @@ -57,13 +57,13 @@ public void testAssignModelToNode_GivenNoPreviousAssignment() { AssignmentPlan plan = builder.build(); assertThat(plan.models(), contains(m)); - assertThat(plan.satisfiesPreviousAssignments(), is(true)); + assertThat(plan.satisfiesCurrentAssignments(), is(true)); assertThat(plan.assignments(m).get(), equalTo(Map.of(n, 1))); } - public void testAssignModelToNode_GivenNewPlanSatisfiesPreviousAssignment() { + public void testAssignModelToNode_GivenNewPlanSatisfiesCurrentAssignment() { Node n = new Node("n_1", 100, 4); - Model m = new Model("m_1", 40, 2, 2, Map.of("n_1", 1)); + Model m = new Model("m_1", 40, 2, 2, Map.of("n_1", 1), 0); AssignmentPlan.Builder builder = AssignmentPlan.builder(List.of(n), List.of(m)); @@ -77,13 +77,13 @@ public void testAssignModelToNode_GivenNewPlanSatisfiesPreviousAssignment() { AssignmentPlan plan = builder.build(); assertThat(plan.models(), contains(m)); - assertThat(plan.satisfiesPreviousAssignments(), is(true)); + assertThat(plan.satisfiesCurrentAssignments(), is(true)); assertThat(plan.assignments(m).get(), equalTo(Map.of(n, 1))); } - public void testAssignModelToNode_GivenNewPlanDoesNotSatisfyPreviousAssignment() { + public void testAssignModelToNode_GivenNewPlanDoesNotSatisfyCurrentAssignment() { Node n = new Node("n_1", 100, 4); - Model m = new Model("m_1", 40, 2, 2, Map.of("n_1", 2)); + Model m = new Model("m_1", 40, 2, 2, Map.of("n_1", 2), 0); AssignmentPlan.Builder builder = AssignmentPlan.builder(List.of(n), List.of(m)); @@ -97,13 +97,13 @@ public void testAssignModelToNode_GivenNewPlanDoesNotSatisfyPreviousAssignment() AssignmentPlan plan = builder.build(); assertThat(plan.models(), contains(m)); - assertThat(plan.satisfiesPreviousAssignments(), is(false)); + assertThat(plan.satisfiesCurrentAssignments(), is(false)); assertThat(plan.assignments(m).get(), equalTo(Map.of(n, 1))); } public void testAssignModelToNode_GivenPreviouslyUnassignedModelDoesNotFit() { Node n = new Node("n_1", 100, 4); - Model m = new Model("m_1", 101, 2, 2, Map.of()); + Model m = new Model("m_1", 101, 2, 2, Map.of(), 0); AssignmentPlan.Builder builder = AssignmentPlan.builder(List.of(n), List.of(m)); Exception e = expectThrows(IllegalArgumentException.class, () -> builder.assignModelToNode(m, n, 1)); @@ -113,20 +113,20 @@ public void testAssignModelToNode_GivenPreviouslyUnassignedModelDoesNotFit() { public void testAssignModelToNode_GivenPreviouslyAssignedModelDoesNotFit() { Node n = new Node("n_1", 100, 4); - Model m = new Model("m_1", 101, 2, 2, Map.of("n_1", 1)); + Model m = new Model("m_1", 101, 2, 2, Map.of("n_1", 1), 0); AssignmentPlan.Builder builder = AssignmentPlan.builder(List.of(n), List.of(m)); builder.assignModelToNode(m, n, 2); AssignmentPlan plan = builder.build(); assertThat(plan.models(), contains(m)); - assertThat(plan.satisfiesPreviousAssignments(), is(true)); + assertThat(plan.satisfiesCurrentAssignments(), is(true)); assertThat(plan.assignments(m).get(), equalTo(Map.of(n, 2))); } public void testAssignModelToNode_GivenNotEnoughCores_AndSingleThreadPerAllocation() { Node n = new Node("n_1", 100, 4); - Model m = new Model("m_1", 100, 5, 1, Map.of()); + Model m = new Model("m_1", 100, 5, 1, Map.of(), 0); AssignmentPlan.Builder builder = AssignmentPlan.builder(List.of(n), List.of(m)); Exception e = expectThrows(IllegalArgumentException.class, () -> builder.assignModelToNode(m, n, 5)); @@ -139,7 +139,7 @@ public void testAssignModelToNode_GivenNotEnoughCores_AndSingleThreadPerAllocati public void testAssignModelToNode_GivenNotEnoughCores_AndMultipleThreadsPerAllocation() { Node n = new Node("n_1", 100, 5); - Model m = new Model("m_1", 100, 3, 2, Map.of()); + Model m = new Model("m_1", 100, 3, 2, Map.of(), 0); AssignmentPlan.Builder builder = AssignmentPlan.builder(List.of(n), List.of(m)); Exception e = expectThrows(IllegalArgumentException.class, () -> builder.assignModelToNode(m, n, 3)); @@ -152,7 +152,7 @@ public void testAssignModelToNode_GivenNotEnoughCores_AndMultipleThreadsPerAlloc public void testAssignModelToNode_GivenSameModelAssignedTwice() { Node n = new Node("n_1", 100, 8); - Model m = new Model("m_1", 60, 4, 2, Map.of()); + Model m = new Model("m_1", 60, 4, 2, Map.of(), 0); AssignmentPlan.Builder builder = AssignmentPlan.builder(List.of(n), List.of(m)); @@ -180,13 +180,13 @@ public void testAssignModelToNode_GivenSameModelAssignedTwice() { AssignmentPlan plan = builder.build(); assertThat(plan.models(), contains(m)); - assertThat(plan.satisfiesPreviousAssignments(), is(true)); + assertThat(plan.satisfiesCurrentAssignments(), is(true)); assertThat(plan.assignments(m).get(), equalTo(Map.of(n, 3))); } public void testCanAssign_GivenPreviouslyUnassignedModelDoesNotFit() { Node n = new Node("n_1", 100, 5); - Model m = new Model("m_1", 101, 1, 1, Map.of()); + Model m = new Model("m_1", 101, 1, 1, Map.of(), 0); AssignmentPlan.Builder builder = AssignmentPlan.builder(List.of(n), List.of(m)); @@ -195,7 +195,7 @@ public void testCanAssign_GivenPreviouslyUnassignedModelDoesNotFit() { public void testCanAssign_GivenPreviouslyAssignedModelDoesNotFit() { Node n = new Node("n_1", 100, 5); - Model m = new Model("m_1", 101, 1, 1, Map.of("n_1", 1)); + Model m = new Model("m_1", 101, 1, 1, Map.of("n_1", 1), 0); AssignmentPlan.Builder builder = AssignmentPlan.builder(List.of(n), List.of(m)); @@ -204,7 +204,7 @@ public void testCanAssign_GivenPreviouslyAssignedModelDoesNotFit() { public void testCanAssign_GivenEnoughMemory() { Node n = new Node("n_1", 100, 5); - Model m = new Model("m_1", 100, 3, 2, Map.of()); + Model m = new Model("m_1", 100, 3, 2, Map.of(), 0); AssignmentPlan.Builder builder = AssignmentPlan.builder(List.of(n), List.of(m)); @@ -219,13 +219,13 @@ public void testCompareTo_GivenDifferenceInPreviousAssignments() { Node n = new Node("n_1", 100, 5); { - Model m = new Model("m_1", 100, 3, 2, Map.of("n_1", 2)); + Model m = new Model("m_1", 100, 3, 2, Map.of("n_1", 2), 0); AssignmentPlan.Builder builder = AssignmentPlan.builder(List.of(n), List.of(m)); builder.assignModelToNode(m, n, 2); planSatisfyingPreviousAssignments = builder.build(); } { - Model m = new Model("m_1", 100, 3, 2, Map.of("n_1", 3)); + Model m = new Model("m_1", 100, 3, 2, Map.of("n_1", 3), 0); AssignmentPlan.Builder builder = AssignmentPlan.builder(List.of(n), List.of(m)); builder.assignModelToNode(m, n, 2); planNotSatisfyingPreviousAssignments = builder.build(); @@ -239,7 +239,7 @@ public void testCompareTo_GivenDifferenceInAllocations() { AssignmentPlan planWithMoreAllocations; AssignmentPlan planWithFewerAllocations; Node n = new Node("n_1", 100, 5); - Model m = new Model("m_1", 100, 3, 2, Map.of("n_1", 1)); + Model m = new Model("m_1", 100, 3, 2, Map.of("n_1", 1), 0); { AssignmentPlan.Builder builder = AssignmentPlan.builder(List.of(n), List.of(m)); @@ -262,13 +262,13 @@ public void testCompareTo_GivenDifferenceInMemory() { Node n = new Node("n_1", 100, 5); { - Model m = new Model("m_1", 100, 3, 2, Map.of("n_1", 1)); + Model m = new Model("m_1", 100, 3, 2, Map.of("n_1", 1), 0); AssignmentPlan.Builder builder = AssignmentPlan.builder(List.of(n), List.of(m)); builder.assignModelToNode(m, n, 2); planUsingMoreMemory = builder.build(); } { - Model m = new Model("m_1", 99, 3, 2, Map.of("n_1", 1)); + Model m = new Model("m_1", 99, 3, 2, Map.of("n_1", 1), 0); AssignmentPlan.Builder builder = AssignmentPlan.builder(List.of(n), List.of(m)); builder.assignModelToNode(m, n, 2); planUsingLessMemory = builder.build(); @@ -281,9 +281,9 @@ public void testCompareTo_GivenDifferenceInMemory() { public void testSatisfiesAllModels_GivenAllModelsAreSatisfied() { Node node1 = new Node("n_1", 100, 4); Node node2 = new Node("n_2", 100, 4); - Model model1 = new Model("m_1", 50, 1, 2, Map.of()); - Model model2 = new Model("m_2", 30, 2, 1, Map.of()); - Model model3 = new Model("m_3", 20, 4, 1, Map.of()); + Model model1 = new Model("m_1", 50, 1, 2, Map.of(), 0); + Model model2 = new Model("m_2", 30, 2, 1, Map.of(), 0); + Model model3 = new Model("m_3", 20, 4, 1, Map.of(), 0); AssignmentPlan plan = AssignmentPlan.builder(List.of(node1, node2), List.of(model1, model2, model3)) .assignModelToNode(model1, node1, 1) .assignModelToNode(model2, node2, 2) @@ -296,9 +296,9 @@ public void testSatisfiesAllModels_GivenAllModelsAreSatisfied() { public void testSatisfiesAllModels_GivenOneModelHasOneAllocationLess() { Node node1 = new Node("n_1", 100, 4); Node node2 = new Node("n_2", 100, 4); - Model model1 = new Model("m_1", 50, 1, 2, Map.of()); - Model model2 = new Model("m_2", 30, 2, 1, Map.of()); - Model model3 = new Model("m_3", 20, 4, 1, Map.of()); + Model model1 = new Model("m_1", 50, 1, 2, Map.of(), 0); + Model model2 = new Model("m_2", 30, 2, 1, Map.of(), 0); + Model model3 = new Model("m_3", 20, 4, 1, Map.of(), 0); AssignmentPlan plan = AssignmentPlan.builder(List.of(node1, node2), List.of(model1, model2, model3)) .assignModelToNode(model1, node1, 1) .assignModelToNode(model2, node2, 2) @@ -307,4 +307,42 @@ public void testSatisfiesAllModels_GivenOneModelHasOneAllocationLess() { .build(); assertThat(plan.satisfiesAllModels(), is(false)); } + + public void testArePreviouslyAssignedModelsAssigned_GivenTrue() { + Node node1 = new Node("n_1", 100, 4); + Node node2 = new Node("n_2", 100, 4); + Model model1 = new Model("m_1", 50, 1, 2, Map.of(), 3); + Model model2 = new Model("m_2", 30, 2, 1, Map.of(), 4); + Model model3 = new Model("m_3", 20, 4, 1, Map.of(), 0); + AssignmentPlan plan = AssignmentPlan.builder(List.of(node1, node2), List.of(model1, model2, model3)) + .assignModelToNode(model1, node1, 1) + .assignModelToNode(model2, node2, 1) + .build(); + assertThat(plan.arePreviouslyAssignedModelsAssigned(), is(true)); + } + + public void testArePreviouslyAssignedModelsAssigned_GivenFalse() { + Node node1 = new Node("n_1", 100, 4); + Node node2 = new Node("n_2", 100, 4); + Model model1 = new Model("m_1", 50, 1, 2, Map.of(), 3); + Model model2 = new Model("m_2", 30, 2, 1, Map.of(), 4); + AssignmentPlan plan = AssignmentPlan.builder(List.of(node1, node2), List.of(model1, model2)) + .assignModelToNode(model1, node1, 1) + .build(); + assertThat(plan.arePreviouslyAssignedModelsAssigned(), is(false)); + } + + public void testCountPreviouslyAssignedThatAreStillAssigned() { + Node node1 = new Node("n_1", 100, 4); + Node node2 = new Node("n_2", 100, 4); + Model model1 = new Model("m_1", 50, 1, 2, Map.of(), 3); + Model model2 = new Model("m_2", 30, 2, 1, Map.of(), 4); + Model model3 = new Model("m_3", 20, 4, 1, Map.of(), 1); + Model model4 = new Model("m_4", 20, 4, 1, Map.of(), 0); + AssignmentPlan plan = AssignmentPlan.builder(List.of(node1, node2), List.of(model1, model2, model3, model4)) + .assignModelToNode(model1, node1, 1) + .assignModelToNode(model2, node2, 1) + .build(); + assertThat(plan.countPreviouslyAssignedModelsThatAreStillAssigned(), equalTo(2L)); + } } diff --git a/x-pack/plugin/ml/src/test/java/org/elasticsearch/xpack/ml/inference/assignment/planning/AssignmentPlannerTests.java b/x-pack/plugin/ml/src/test/java/org/elasticsearch/xpack/ml/inference/assignment/planning/AssignmentPlannerTests.java index 3db1989d03bd0..3ec3f8cece8eb 100644 --- a/x-pack/plugin/ml/src/test/java/org/elasticsearch/xpack/ml/inference/assignment/planning/AssignmentPlannerTests.java +++ b/x-pack/plugin/ml/src/test/java/org/elasticsearch/xpack/ml/inference/assignment/planning/AssignmentPlannerTests.java @@ -14,16 +14,19 @@ import org.elasticsearch.xpack.ml.inference.assignment.planning.AssignmentPlan.Node; import java.util.ArrayList; +import java.util.HashMap; import java.util.HashSet; import java.util.List; import java.util.Map; import java.util.Optional; import java.util.Set; import java.util.stream.Collectors; +import java.util.stream.Stream; import static org.hamcrest.Matchers.equalTo; import static org.hamcrest.Matchers.everyItem; import static org.hamcrest.Matchers.greaterThanOrEqualTo; +import static org.hamcrest.Matchers.hasItems; import static org.hamcrest.Matchers.in; import static org.hamcrest.Matchers.is; import static org.hamcrest.Matchers.nullValue; @@ -32,14 +35,14 @@ public class AssignmentPlannerTests extends ESTestCase { public void testModelThatDoesNotFitInMemory() { List nodes = List.of(new Node("n_1", 100, 4)); - Model model = new Model("m_1", 101, 4, 1, Map.of()); + Model model = new Model("m_1", 101, 4, 1, Map.of(), 0); AssignmentPlan plan = new AssignmentPlanner(nodes, List.of(model)).computePlan(); assertThat(plan.assignments(model).isEmpty(), is(true)); } public void testModelWithThreadsPerAllocationNotFittingOnAnyNode() { List nodes = List.of(new Node("n_1", 100, 4), new Node("n_2", 100, 5)); - Model model = new Model("m_1", 1, 1, 6, Map.of()); + Model model = new Model("m_1", 1, 1, 6, Map.of(), 0); AssignmentPlan plan = new AssignmentPlanner(nodes, List.of(model)).computePlan(); assertThat(plan.assignments(model).isEmpty(), is(true)); } @@ -47,19 +50,19 @@ public void testModelWithThreadsPerAllocationNotFittingOnAnyNode() { public void testSingleModelThatFitsFullyOnSingleNode() { { Node node = new Node("n_1", 100, 4); - Model model = new Model("m_1", 100, 1, 1, Map.of()); + Model model = new Model("m_1", 100, 1, 1, Map.of(), 0); AssignmentPlan plan = new AssignmentPlanner(List.of(node), List.of(model)).computePlan(); assertModelFullyAssignedToNode(plan, model, node); } { Node node = new Node("n_1", 1000, 8); - Model model = new Model("m_1", 1000, 8, 1, Map.of()); + Model model = new Model("m_1", 1000, 8, 1, Map.of(), 0); AssignmentPlan plan = new AssignmentPlanner(List.of(node), List.of(model)).computePlan(); assertModelFullyAssignedToNode(plan, model, node); } { Node node = new Node("n_1", 10000, 16); - Model model = new Model("m_1", 10000, 1, 16, Map.of()); + Model model = new Model("m_1", 10000, 1, 16, Map.of(), 0); AssignmentPlan plan = new AssignmentPlanner(List.of(node), List.of(model)).computePlan(); assertModelFullyAssignedToNode(plan, model, node); } @@ -68,7 +71,7 @@ public void testSingleModelThatFitsFullyOnSingleNode() { public void testSingleModelThatFitsFullyOnSingleNode_GivenTwoNodes_ShouldBeFullyAssignedOnOneNode() { Node node1 = new Node("n_1", 100, 4); Node node2 = new Node("n_2", 100, 4); - Model model = new Model("m_1", 100, 4, 1, Map.of()); + Model model = new Model("m_1", 100, 4, 1, Map.of(), 0); AssignmentPlan plan = new AssignmentPlanner(List.of(node1, node2), List.of(model)).computePlan(); @@ -81,7 +84,7 @@ public void testSingleModelThatFitsFullyOnSingleNode_GivenTwoNodes_ShouldBeFully } public void testModelWithMoreAllocationsThanAvailableCores_GivenSingleThreadPerAllocation() { - Model model = new Model("m_1", 30, 10, 1, Map.of()); + Model model = new Model("m_1", 30, 10, 1, Map.of(), 0); // Single node { Node node = new Node("n_1", 100, 4); @@ -119,10 +122,10 @@ public void testMultipleModelsAndNodesWithSingleSolution() { Node node2 = new Node("n_2", 100, 7); Node node3 = new Node("n_3", 100, 2); Node node4 = new Node("n_4", 100, 2); - Model model1 = new Model("m_1", 50, 2, 4, Map.of()); - Model model2 = new Model("m_2", 50, 2, 3, Map.of()); - Model model3 = new Model("m_3", 50, 1, 2, Map.of()); - Model model4 = new Model("m_4", 50, 2, 1, Map.of()); + Model model1 = new Model("m_1", 50, 2, 4, Map.of(), 0); + Model model2 = new Model("m_2", 50, 2, 3, Map.of(), 0); + Model model3 = new Model("m_3", 50, 1, 2, Map.of(), 0); + Model model4 = new Model("m_4", 50, 2, 1, Map.of(), 0); AssignmentPlan plan = new AssignmentPlanner(List.of(node1, node2, node3, node4), List.of(model1, model2, model3, model4)) .computePlan(); @@ -168,7 +171,7 @@ public void testMultipleModelsAndNodesWithSingleSolution() { } public void testModelWithMoreAllocationsThanAvailableCores_GivenThreeThreadsPerAllocation() { - Model model = new Model("m_1", 30, 10, 3, Map.of()); + Model model = new Model("m_1", 30, 10, 3, Map.of(), 0); // Single node { Node node = new Node("n_1", 100, 4); @@ -203,7 +206,7 @@ public void testModelWithMoreAllocationsThanAvailableCores_GivenThreeThreadsPerA public void testModelWithPreviousAssignmentAndNoMoreCoresAvailable() { Node node = new Node("n_1", 100, 4); - Model model = new Model("m_1", 30, 4, 1, Map.of("n_1", 4)); + Model model = new Model("m_1", 30, 4, 1, Map.of("n_1", 4), 0); AssignmentPlan plan = new AssignmentPlanner(List.of(node), List.of(model)).computePlan(); assertThat(plan.assignments(model).isPresent(), is(true)); @@ -220,18 +223,18 @@ public void testFullCoreUtilization_GivenModelsWithSingleThreadPerAllocation() { new Node("n_6", ByteSizeValue.ofGb(8).getBytes(), 16) ); List models = List.of( - new Model("m_1", ByteSizeValue.ofGb(4).getBytes(), 10, 1, Map.of("n_1", 5)), - new Model("m_2", ByteSizeValue.ofGb(2).getBytes(), 3, 1, Map.of("n_3", 2)), - new Model("m_3", ByteSizeValue.ofGb(3).getBytes(), 3, 1, Map.of()), - new Model("m_4", ByteSizeValue.ofGb(1).getBytes(), 4, 1, Map.of("n_3", 2)), - new Model("m_5", ByteSizeValue.ofGb(6).getBytes(), 2, 1, Map.of()), - new Model("m_6", ByteSizeValue.ofGb(1).getBytes(), 12, 1, Map.of()), - new Model("m_7", ByteSizeValue.ofGb(1).getBytes() / 2, 12, 1, Map.of("n_2", 6)), - new Model("m_8", ByteSizeValue.ofGb(2).getBytes(), 4, 1, Map.of()), - new Model("m_9", ByteSizeValue.ofGb(1).getBytes(), 4, 1, Map.of()), - new Model("m_10", ByteSizeValue.ofGb(7).getBytes(), 7, 1, Map.of()), - new Model("m_11", ByteSizeValue.ofGb(2).getBytes(), 3, 1, Map.of()), - new Model("m_12", ByteSizeValue.ofGb(1).getBytes(), 10, 1, Map.of()) + new Model("m_1", ByteSizeValue.ofGb(4).getBytes(), 10, 1, Map.of("n_1", 5), 0), + new Model("m_2", ByteSizeValue.ofGb(2).getBytes(), 3, 1, Map.of("n_3", 2), 0), + new Model("m_3", ByteSizeValue.ofGb(3).getBytes(), 3, 1, Map.of(), 0), + new Model("m_4", ByteSizeValue.ofGb(1).getBytes(), 4, 1, Map.of("n_3", 2), 0), + new Model("m_5", ByteSizeValue.ofGb(6).getBytes(), 2, 1, Map.of(), 0), + new Model("m_6", ByteSizeValue.ofGb(1).getBytes(), 12, 1, Map.of(), 0), + new Model("m_7", ByteSizeValue.ofGb(1).getBytes() / 2, 12, 1, Map.of("n_2", 6), 0), + new Model("m_8", ByteSizeValue.ofGb(2).getBytes(), 4, 1, Map.of(), 0), + new Model("m_9", ByteSizeValue.ofGb(1).getBytes(), 4, 1, Map.of(), 0), + new Model("m_10", ByteSizeValue.ofGb(7).getBytes(), 7, 1, Map.of(), 0), + new Model("m_11", ByteSizeValue.ofGb(2).getBytes(), 3, 1, Map.of(), 0), + new Model("m_12", ByteSizeValue.ofGb(1).getBytes(), 10, 1, Map.of(), 0) ); AssignmentPlan assignmentPlan = new AssignmentPlanner(nodes, models).computePlan(); @@ -330,7 +333,9 @@ public void testPreviousAssignmentsGetAtLeastAsManyAllocationsAfterAddingNewMode Map previousAssignments = assignments.entrySet() .stream() .collect(Collectors.toMap(e -> e.getKey().id(), Map.Entry::getValue)); - previousModelsPlusNew.add(new Model(m.id(), m.memoryBytes(), m.allocations(), m.threadsPerAllocation(), previousAssignments)); + previousModelsPlusNew.add( + new Model(m.id(), m.memoryBytes(), m.allocations(), m.threadsPerAllocation(), previousAssignments, 0) + ); } previousModelsPlusNew.add(randomModel("new")); @@ -343,8 +348,8 @@ public void testGivenLargerModelWithPreviousAssignmentsAndSmallerModelWithoutAss Node node1 = new Node("n_1", ByteSizeValue.ofGb(2).getBytes(), 2); Node node2 = new Node("n_2", ByteSizeValue.ofGb(2).getBytes(), 2); Node node3 = new Node("n_3", ByteSizeValue.ofGb(2).getBytes(), 2); - Model model1 = new Model("m_1", ByteSizeValue.ofMb(1200).getBytes(), 3, 1, Map.of("n_1", 2, "n_2", 1)); - Model model2 = new Model("m_2", ByteSizeValue.ofMb(1100).getBytes(), 2, 1, Map.of()); + Model model1 = new Model("m_1", ByteSizeValue.ofMb(1200).getBytes(), 3, 1, Map.of("n_1", 2, "n_2", 1), 0); + Model model2 = new Model("m_2", ByteSizeValue.ofMb(1100).getBytes(), 2, 1, Map.of(), 0); AssignmentPlan assignmentPlan = new AssignmentPlanner(List.of(node1, node2, node3), List.of(model1, model2)).computePlan(); { assertThat(assignmentPlan.assignments(model1).isPresent(), is(true)); @@ -362,6 +367,131 @@ public void testGivenLargerModelWithPreviousAssignmentsAndSmallerModelWithoutAss } } + public void testModelWithoutCurrentAllocationsGetsAssignedIfAllocatedPreviously() { + Node node1 = new Node("n_1", ByteSizeValue.ofGb(4).getBytes(), 2); + Node node2 = new Node("n_2", ByteSizeValue.ofGb(4).getBytes(), 2); + Model model1 = new Model("m_1", ByteSizeValue.ofMb(1200).getBytes(), 3, 1, Map.of("n_1", 2, "n_2", 1), 3); + Model model2 = new Model("m_2", ByteSizeValue.ofMb(1100).getBytes(), 1, 2, Map.of(), 1); + + AssignmentPlan assignmentPlan = new AssignmentPlanner(List.of(node1, node2), List.of(model1, model2)).computePlan(); + + Map> indexedBasedPlan = convertToIdIndexed(assignmentPlan); + assertThat(indexedBasedPlan.keySet(), hasItems("m_1", "m_2")); + assertThat(indexedBasedPlan.get("m_1"), equalTo(Map.of("n_1", 2))); + assertThat(indexedBasedPlan.get("m_2"), equalTo(Map.of("n_2", 1))); + } + + public void testGivenPreviouslyAssignedModels_CannotAllBeAllocated() { + Node node1 = new Node("n_1", ByteSizeValue.ofGb(2).getBytes(), 2); + Model model1 = new Model("m_1", ByteSizeValue.ofMb(1200).getBytes(), 1, 1, Map.of(), 1); + Model model2 = new Model("m_2", ByteSizeValue.ofMb(1100).getBytes(), 1, 1, Map.of(), 1); + + AssignmentPlan assignmentPlan = new AssignmentPlanner(List.of(node1), List.of(model1, model2)).computePlan(); + + assertThat(assignmentPlan.countPreviouslyAssignedModelsThatAreStillAssigned(), equalTo(1L)); + } + + public void testGivenClusterResize_ShouldAllocateEachModelAtLeastOnce() { + Node node1 = new Node("n_1", ByteSizeValue.ofMb(1200).getBytes(), 2); + Node node2 = new Node("n_2", ByteSizeValue.ofMb(1200).getBytes(), 2); + Model model1 = new Model("m_1", ByteSizeValue.ofMb(800).getBytes(), 2, 1, Map.of(), 0); + Model model2 = new Model("m_2", ByteSizeValue.ofMb(800).getBytes(), 1, 1, Map.of(), 0); + Model model3 = new Model("m_3", ByteSizeValue.ofMb(250).getBytes(), 4, 1, Map.of(), 0); + + // First only start m_1 + AssignmentPlan assignmentPlan = new AssignmentPlanner(List.of(node1, node2), List.of(model1)).computePlan(); + + Map> indexedBasedPlan = convertToIdIndexed(assignmentPlan); + assertThat(indexedBasedPlan.keySet(), hasItems("m_1")); + assertThat(indexedBasedPlan.get("m_1"), equalTo(Map.of("n_1", 2))); + + // Then start m_2 + assignmentPlan = new AssignmentPlanner( + List.of(node1, node2), + Stream.concat(createModelsFromPlan(assignmentPlan).stream(), Stream.of(model2)).toList() + ).computePlan(); + + indexedBasedPlan = convertToIdIndexed(assignmentPlan); + assertThat(indexedBasedPlan.keySet(), hasItems("m_1", "m_2")); + assertThat(indexedBasedPlan.get("m_1"), equalTo(Map.of("n_1", 2))); + assertThat(indexedBasedPlan.get("m_2"), equalTo(Map.of("n_2", 1))); + + // Then start m_3 + assignmentPlan = new AssignmentPlanner( + List.of(node1, node2), + Stream.concat(createModelsFromPlan(assignmentPlan).stream(), Stream.of(model3)).toList() + ).computePlan(); + + indexedBasedPlan = convertToIdIndexed(assignmentPlan); + assertThat(indexedBasedPlan.keySet(), hasItems("m_1", "m_2", "m_3")); + assertThat(indexedBasedPlan.get("m_1"), equalTo(Map.of("n_1", 2))); + assertThat(indexedBasedPlan.get("m_2"), equalTo(Map.of("n_2", 1))); + assertThat(indexedBasedPlan.get("m_3"), equalTo(Map.of("n_2", 1))); + + // Now the cluster starts getting resized. + Node node3 = new Node("n_3", ByteSizeValue.ofMb(2400).getBytes(), 2); + Node node4 = new Node("n_4", ByteSizeValue.ofMb(2400).getBytes(), 2); + + // First, one node goes away. + assignmentPlan = new AssignmentPlanner(List.of(node1), createModelsFromPlan(assignmentPlan)).computePlan(); + + // Then, a node double in memory size is added. + assignmentPlan = new AssignmentPlanner(List.of(node1, node3), createModelsFromPlan(assignmentPlan)).computePlan(); + // And another. + assignmentPlan = new AssignmentPlanner(List.of(node1, node3, node4), createModelsFromPlan(assignmentPlan)).computePlan(); + // Finally, the remaining smaller node is removed + assignmentPlan = new AssignmentPlanner(List.of(node3, node4), createModelsFromPlan(assignmentPlan)).computePlan(); + + indexedBasedPlan = convertToIdIndexed(assignmentPlan); + assertThat(indexedBasedPlan.keySet(), hasItems("m_1", "m_2", "m_3")); + assertThat(indexedBasedPlan.get("m_1").values().stream().mapToInt(Integer::intValue).sum(), greaterThanOrEqualTo(1)); + assertThat(indexedBasedPlan.get("m_2").values().stream().mapToInt(Integer::intValue).sum(), greaterThanOrEqualTo(1)); + assertThat(indexedBasedPlan.get("m_3").values().stream().mapToInt(Integer::intValue).sum(), greaterThanOrEqualTo(1)); + + // Assert that all cores are utilized + assertThat(assignmentPlan.getRemainingNodeCores("n_1"), equalTo(0)); + assertThat(assignmentPlan.getRemainingNodeCores("n_2"), equalTo(0)); + } + + private static List createModelsFromPlan(AssignmentPlan plan) { + List models = new ArrayList<>(); + for (Model m : plan.models()) { + Optional> assignments = plan.assignments(m); + Map currentAllocations = Map.of(); + if (assignments.isPresent()) { + currentAllocations = new HashMap<>(); + for (Map.Entry nodeAssignments : assignments.get().entrySet()) { + currentAllocations.put(nodeAssignments.getKey().id(), nodeAssignments.getValue()); + } + } + int totalAllocations = currentAllocations.values().stream().mapToInt(Integer::intValue).sum(); + models.add( + new Model( + m.id(), + m.memoryBytes(), + m.allocations(), + m.threadsPerAllocation(), + currentAllocations, + Math.max(m.maxAssignedAllocations(), totalAllocations) + ) + ); + } + return models; + } + + private static Map> convertToIdIndexed(AssignmentPlan plan) { + Map> result = new HashMap<>(); + for (Model m : plan.models()) { + Optional> assignments = plan.assignments(m); + Map allocationsPerNodeId = assignments.isPresent() ? new HashMap<>() : Map.of(); + for (Map.Entry nodeAssignments : assignments.orElse(Map.of()).entrySet()) { + allocationsPerNodeId.put(nodeAssignments.getKey().id(), nodeAssignments.getValue()); + } + result.put(m.id(), allocationsPerNodeId); + } + return result; + } + private static void assertModelFullyAssignedToNode(AssignmentPlan plan, Model m, Node n) { Optional> assignments = plan.assignments(m); assertThat(assignments.isPresent(), is(true)); @@ -395,12 +525,14 @@ private List randomModels(int scale, double load, List nodes) { } private static Model randomModel(String idSuffix) { + int allocations = randomIntBetween(1, 32); return new Model( "m_" + idSuffix, randomLongBetween(ByteSizeValue.ofMb(100).getBytes(), ByteSizeValue.ofGb(10).getBytes()), randomIntBetween(1, 32), randomIntBetween(1, 4), - Map.of() + Map.of(), + 0 ); } @@ -417,7 +549,7 @@ private static void assertPreviousAssignmentsAreSatisfied(List models, As allocations += e.getValue(); } assertThat(m.currentAllocationsByNodeId().keySet(), everyItem(in(assignedNodeIds))); - assertThat(allocations, greaterThanOrEqualTo(m.getPreviouslyAssignedAllocations())); + assertThat(allocations, greaterThanOrEqualTo(m.getCurrentAssignedAllocations())); } } @@ -428,7 +560,7 @@ private void runTooManyNodesAndModels(int nodesSize, int modelsSize) { } List models = new ArrayList<>(); for (int i = 0; i < modelsSize; i++) { - models.add(new Model("m_" + i, ByteSizeValue.ofMb(200).getBytes(), 2, 1, Map.of())); + models.add(new Model("m_" + i, ByteSizeValue.ofMb(200).getBytes(), 2, 1, Map.of(), 0)); } // Check plan is computed without OOM exception diff --git a/x-pack/plugin/ml/src/test/java/org/elasticsearch/xpack/ml/inference/assignment/planning/PreserveAllAllocationsTests.java b/x-pack/plugin/ml/src/test/java/org/elasticsearch/xpack/ml/inference/assignment/planning/PreserveAllAllocationsTests.java index 7add808f37978..8a798b4e469ae 100644 --- a/x-pack/plugin/ml/src/test/java/org/elasticsearch/xpack/ml/inference/assignment/planning/PreserveAllAllocationsTests.java +++ b/x-pack/plugin/ml/src/test/java/org/elasticsearch/xpack/ml/inference/assignment/planning/PreserveAllAllocationsTests.java @@ -24,8 +24,8 @@ public class PreserveAllAllocationsTests extends ESTestCase { public void testGivenNoPreviousAssignments() { Node node1 = new Node("n_1", 100, 4); Node node2 = new Node("n_2", 100, 4); - Model model1 = new Model("m_1", 30, 2, 1, Map.of()); - Model model2 = new Model("m_2", 30, 2, 4, Map.of()); + Model model1 = new Model("m_1", 30, 2, 1, Map.of(), 0); + Model model2 = new Model("m_2", 30, 2, 4, Map.of(), 0); PreserveAllAllocations preserveAllAllocations = new PreserveAllAllocations(List.of(node1, node2), List.of(model1, model2)); List nodesPreservingAllocations = preserveAllAllocations.nodesPreservingAllocations(); @@ -38,8 +38,8 @@ public void testGivenNoPreviousAssignments() { public void testGivenPreviousAssignments() { Node node1 = new Node("n_1", 100, 8); Node node2 = new Node("n_2", 100, 8); - Model model1 = new Model("m_1", 30, 2, 1, Map.of("n_1", 1)); - Model model2 = new Model("m_2", 50, 6, 4, Map.of("n_1", 1, "n_2", 2)); + Model model1 = new Model("m_1", 30, 2, 1, Map.of("n_1", 1), 1); + Model model2 = new Model("m_2", 50, 6, 4, Map.of("n_1", 1, "n_2", 2), 3); PreserveAllAllocations preserveAllAllocations = new PreserveAllAllocations(List.of(node1, node2), List.of(model1, model2)); List nodesPreservingAllocations = preserveAllAllocations.nodesPreservingAllocations(); @@ -86,7 +86,7 @@ public void testGivenPreviousAssignments() { public void testGivenModelWithPreviousAssignments_AndPlanToMergeHasNoAssignments() { Node node = new Node("n_1", 100, 4); - Model model = new Model("m_1", 30, 2, 2, Map.of("n_1", 2)); + Model model = new Model("m_1", 30, 2, 2, Map.of("n_1", 2), 2); PreserveAllAllocations preserveAllAllocations = new PreserveAllAllocations(List.of(node), List.of(model)); AssignmentPlan plan = AssignmentPlan.builder(List.of(node), List.of(model)).build(); diff --git a/x-pack/plugin/ml/src/test/java/org/elasticsearch/xpack/ml/inference/assignment/planning/PreserveOneAllocationTests.java b/x-pack/plugin/ml/src/test/java/org/elasticsearch/xpack/ml/inference/assignment/planning/PreserveOneAllocationTests.java index 7c8ea92cd8d49..655f8a6ecf05a 100644 --- a/x-pack/plugin/ml/src/test/java/org/elasticsearch/xpack/ml/inference/assignment/planning/PreserveOneAllocationTests.java +++ b/x-pack/plugin/ml/src/test/java/org/elasticsearch/xpack/ml/inference/assignment/planning/PreserveOneAllocationTests.java @@ -24,8 +24,8 @@ public class PreserveOneAllocationTests extends ESTestCase { public void testGivenNoPreviousAssignments() { Node node1 = new Node("n_1", 100, 4); Node node2 = new Node("n_2", 100, 4); - Model model1 = new Model("m_1", 30, 2, 1, Map.of()); - Model model2 = new Model("m_2", 30, 2, 4, Map.of()); + Model model1 = new Model("m_1", 30, 2, 1, Map.of(), 0); + Model model2 = new Model("m_2", 30, 2, 4, Map.of(), 0); PreserveOneAllocation preserveOneAllocation = new PreserveOneAllocation(List.of(node1, node2), List.of(model1, model2)); List nodesPreservingAllocations = preserveOneAllocation.nodesPreservingAllocations(); @@ -38,8 +38,8 @@ public void testGivenNoPreviousAssignments() { public void testGivenPreviousAssignments() { Node node1 = new Node("n_1", 100, 8); Node node2 = new Node("n_2", 100, 8); - Model model1 = new Model("m_1", 30, 2, 1, Map.of("n_1", 1)); - Model model2 = new Model("m_2", 50, 6, 4, Map.of("n_1", 1, "n_2", 2)); + Model model1 = new Model("m_1", 30, 2, 1, Map.of("n_1", 1), 1); + Model model2 = new Model("m_2", 50, 6, 4, Map.of("n_1", 1, "n_2", 2), 3); PreserveOneAllocation preserveOneAllocation = new PreserveOneAllocation(List.of(node1, node2), List.of(model1, model2)); List nodesPreservingAllocations = preserveOneAllocation.nodesPreservingAllocations(); @@ -87,7 +87,7 @@ public void testGivenPreviousAssignments() { public void testGivenModelWithPreviousAssignments_AndPlanToMergeHasNoAssignments() { Node node = new Node("n_1", 100, 4); - Model model = new Model("m_1", 30, 2, 2, Map.of("n_1", 2)); + Model model = new Model("m_1", 30, 2, 2, Map.of("n_1", 2), 2); PreserveOneAllocation preserveOneAllocation = new PreserveOneAllocation(List.of(node), List.of(model)); AssignmentPlan plan = AssignmentPlan.builder(List.of(node), List.of(model)).build(); diff --git a/x-pack/plugin/ml/src/test/java/org/elasticsearch/xpack/ml/inference/nlp/tokenizers/BasicTokenFilterTests.java b/x-pack/plugin/ml/src/test/java/org/elasticsearch/xpack/ml/inference/nlp/tokenizers/BasicTokenFilterTests.java index 9199e2c776f2e..a3288baf65968 100644 --- a/x-pack/plugin/ml/src/test/java/org/elasticsearch/xpack/ml/inference/nlp/tokenizers/BasicTokenFilterTests.java +++ b/x-pack/plugin/ml/src/test/java/org/elasticsearch/xpack/ml/inference/nlp/tokenizers/BasicTokenFilterTests.java @@ -67,6 +67,7 @@ public void testSplitCJK() throws Exception { public void testStripAccents() throws Exception { Analyzer analyzer = basicAnalyzerFromSettings(true, true, List.of("[UNK]")); assertAnalyzesToNoCharFilter(analyzer, "HäLLo how are you", new String[] { "HaLLo", "how", "are", "you" }); + assertAnalyzesToNoCharFilter(analyzer, "ÎÎÎÏνÎÎÎαοÏ", new String[] { "IIIII½IIII±I", "¿", "I" }); } private static void assertAnalyzesToNoCharFilter(Analyzer a, String input, String[] output) throws IOException { diff --git a/x-pack/plugin/ml/src/test/java/org/elasticsearch/xpack/ml/job/process/autodetect/writer/AbstractDataToProcessWriterTests.java b/x-pack/plugin/ml/src/test/java/org/elasticsearch/xpack/ml/job/process/autodetect/writer/AbstractDataToProcessWriterTests.java index 68d75e7ec2b7c..c3562cc143dd0 100644 --- a/x-pack/plugin/ml/src/test/java/org/elasticsearch/xpack/ml/job/process/autodetect/writer/AbstractDataToProcessWriterTests.java +++ b/x-pack/plugin/ml/src/test/java/org/elasticsearch/xpack/ml/job/process/autodetect/writer/AbstractDataToProcessWriterTests.java @@ -165,4 +165,81 @@ public void testTokenizeForCategorization() throws IOException { ); } } + + public void testMaybeTruncateCategorizationField() { + { + DataDescription.Builder dd = new DataDescription.Builder(); + dd.setTimeField("time_field"); + + Detector.Builder detector = new Detector.Builder("count", ""); + detector.setByFieldName("mlcategory"); + AnalysisConfig.Builder builder = new AnalysisConfig.Builder(Collections.singletonList(detector.build())); + builder.setCategorizationFieldName("message"); + AnalysisConfig ac = builder.build(); + + boolean includeTokensFields = randomBoolean(); + AbstractDataToProcessWriter writer = new JsonDataToProcessWriter( + true, + includeTokensFields, + autodetectProcess, + dd.build(), + ac, + dataCountsReporter, + NamedXContentRegistry.EMPTY + ); + + String truncatedField = writer.maybeTruncateCatgeorizationField(randomAlphaOfLengthBetween(1002, 2000)); + assertEquals(AnalysisConfig.MAX_CATEGORIZATION_FIELD_LENGTH, truncatedField.length()); + } + { + DataDescription.Builder dd = new DataDescription.Builder(); + dd.setTimeField("time_field"); + + Detector.Builder detector = new Detector.Builder("count", ""); + detector.setByFieldName("mlcategory"); + AnalysisConfig.Builder builder = new AnalysisConfig.Builder(Collections.singletonList(detector.build())); + builder.setCategorizationFieldName("message"); + AnalysisConfig ac = builder.build(); + + boolean includeTokensFields = randomBoolean(); + AbstractDataToProcessWriter writer = new JsonDataToProcessWriter( + true, + includeTokensFields, + autodetectProcess, + dd.build(), + ac, + dataCountsReporter, + NamedXContentRegistry.EMPTY + ); + + String categorizationField = randomAlphaOfLengthBetween(1, 1000); + String truncatedField = writer.maybeTruncateCatgeorizationField(categorizationField); + assertEquals(categorizationField.length(), truncatedField.length()); + } + { + DataDescription.Builder dd = new DataDescription.Builder(); + dd.setTimeField("time_field"); + + Detector.Builder detector = new Detector.Builder("count", ""); + detector.setByFieldName("mlcategory"); + detector.setPartitionFieldName("message"); + AnalysisConfig.Builder builder = new AnalysisConfig.Builder(Collections.singletonList(detector.build())); + builder.setCategorizationFieldName("message"); + AnalysisConfig ac = builder.build(); + + boolean includeTokensFields = randomBoolean(); + AbstractDataToProcessWriter writer = new JsonDataToProcessWriter( + true, + includeTokensFields, + autodetectProcess, + dd.build(), + ac, + dataCountsReporter, + NamedXContentRegistry.EMPTY + ); + + String truncatedField = writer.maybeTruncateCatgeorizationField(randomAlphaOfLengthBetween(1002, 2000)); + assertFalse(AnalysisConfig.MAX_CATEGORIZATION_FIELD_LENGTH == truncatedField.length()); + } + } } diff --git a/x-pack/plugin/ml/src/test/java/org/elasticsearch/xpack/ml/job/process/autodetect/writer/JsonDataToProcessWriterTests.java b/x-pack/plugin/ml/src/test/java/org/elasticsearch/xpack/ml/job/process/autodetect/writer/JsonDataToProcessWriterTests.java index 21aa644b00ee8..82a4ddea6460c 100644 --- a/x-pack/plugin/ml/src/test/java/org/elasticsearch/xpack/ml/job/process/autodetect/writer/JsonDataToProcessWriterTests.java +++ b/x-pack/plugin/ml/src/test/java/org/elasticsearch/xpack/ml/job/process/autodetect/writer/JsonDataToProcessWriterTests.java @@ -23,7 +23,6 @@ import org.elasticsearch.xpack.core.ml.job.config.CategorizationAnalyzerConfig; import org.elasticsearch.xpack.core.ml.job.config.DataDescription; import org.elasticsearch.xpack.core.ml.job.config.Detector; -import org.elasticsearch.xpack.ml.MachineLearning; import org.elasticsearch.xpack.ml.job.categorization.CategorizationAnalyzer; import org.elasticsearch.xpack.ml.job.categorization.CategorizationAnalyzerTests; import org.elasticsearch.xpack.ml.job.process.DataCountsReporter; @@ -135,15 +134,10 @@ public void testWrite_GivenTimeFormatIsEpochAndCategorization() throws Exception List expectedRecords = new ArrayList<>(); // The "." field is the control field; "..." is the pre-tokenized tokens field - if (MachineLearning.CATEGORIZATION_TOKENIZATION_IN_JAVA) { - expectedRecords.add(new String[] { "time", "message", "...", "." }); - expectedRecords.add(new String[] { "1", "Node 1 started", "Node,started", "" }); - expectedRecords.add(new String[] { "2", "Node 2 started", "Node,started", "" }); - } else { - expectedRecords.add(new String[] { "time", "message", "." }); - expectedRecords.add(new String[] { "1", "Node 1 started", "" }); - expectedRecords.add(new String[] { "2", "Node 2 started", "" }); - } + expectedRecords.add(new String[] { "time", "message", "...", "." }); + expectedRecords.add(new String[] { "1", "Node 1 started", "Node,started", "" }); + expectedRecords.add(new String[] { "2", "Node 2 started", "Node,started", "" }); + assertWrittenRecordsEqualTo(expectedRecords); verify(dataCountsReporter).finishReporting(); @@ -411,8 +405,7 @@ private static InputStream createInputStream(String input) { } private JsonDataToProcessWriter createWriter() { - boolean includeTokensField = MachineLearning.CATEGORIZATION_TOKENIZATION_IN_JAVA - && analysisConfig.getCategorizationFieldName() != null; + boolean includeTokensField = analysisConfig.getCategorizationFieldName() != null; return new JsonDataToProcessWriter( true, includeTokensField, diff --git a/x-pack/plugin/rollup/qa/rest/src/yamlRestTest/resources/rest-api-spec/test/rollup/10_basic.yml b/x-pack/plugin/rollup/qa/rest/src/yamlRestTest/resources/rest-api-spec/test/rollup/10_basic.yml index fee9a0ed0ed08..6c062ce4f4507 100644 --- a/x-pack/plugin/rollup/qa/rest/src/yamlRestTest/resources/rest-api-spec/test/rollup/10_basic.yml +++ b/x-pack/plugin/rollup/qa/rest/src/yamlRestTest/resources/rest-api-spec/test/rollup/10_basic.yml @@ -44,6 +44,12 @@ setup: type: keyword values: type: integer + multi-counter: + type: long + time_series_metric: counter + multi-gauge: + type: integer + time_series_metric: gauge network: properties: tx: @@ -58,21 +64,21 @@ setup: index: test body: - '{"index": {}}' - - '{"@timestamp": "2021-04-28T18:50:04.467Z", "metricset": "pod", "k8s": {"pod": {"name": "cat", "uid":"947e4ced-1786-4e53-9e0c-5c447e959507", "ip": "10.10.55.1", "network": {"tx": 2001818691, "rx": 802133794}, "created_at": "2021-04-28T19:34:00.000Z", "running": false, "number_of_containers": 2, "tags": ["backend", "prod"], "values": [2, 3, 6]}}}' + - '{"@timestamp": "2021-04-28T18:50:04.467Z", "metricset": "pod", "k8s": {"pod": {"name": "cat", "uid":"947e4ced-1786-4e53-9e0c-5c447e959507", "ip": "10.10.55.1", "multi-counter" : [10, 11, 12], "multi-gauge": [100, 200, 150], "network": {"tx": 2001818691, "rx": 802133794}, "created_at": "2021-04-28T19:34:00.000Z", "running": false, "number_of_containers": 2, "tags": ["backend", "prod"], "values": [2, 3, 6]}}}' - '{"index": {}}' - - '{"@timestamp": "2021-04-28T18:50:24.467Z", "metricset": "pod", "k8s": {"pod": {"name": "cat", "uid":"947e4ced-1786-4e53-9e0c-5c447e959507", "ip": "10.10.55.26", "network": {"tx": 2005177954, "rx": 801479970}, "created_at": "2021-04-28T19:35:00.000Z", "running": true, "number_of_containers": 2, "tags": ["backend", "prod", "us-west1"], "values": [1, 1, 3]}}}' + - '{"@timestamp": "2021-04-28T18:50:24.467Z", "metricset": "pod", "k8s": {"pod": {"name": "cat", "uid":"947e4ced-1786-4e53-9e0c-5c447e959507", "ip": "10.10.55.26", "multi-counter" : [21, 22, 23], "multi-gauge": [90, 91, 95], "network": {"tx": 2005177954, "rx": 801479970}, "created_at": "2021-04-28T19:35:00.000Z", "running": true, "number_of_containers": 2, "tags": ["backend", "prod", "us-west1"], "values": [1, 1, 3]}}}' - '{"index": {}}' - - '{"@timestamp": "2021-04-28T20:50:44.467Z", "metricset": "pod", "k8s": {"pod": {"name": "cat", "uid":"947e4ced-1786-4e53-9e0c-5c447e959507", "ip": "10.10.55.41", "network": {"tx": 2006223737, "rx": 802337279}, "created_at": "2021-04-28T19:36:00.000Z", "running": true, "number_of_containers": 2, "tags": ["backend", "prod", "us-west2"], "values": [4, 1, 2]}}}' + - '{"@timestamp": "2021-04-28T20:50:44.467Z", "metricset": "pod", "k8s": {"pod": {"name": "cat", "uid":"947e4ced-1786-4e53-9e0c-5c447e959507", "ip": "10.10.55.41", "multi-counter" : [1, 5, 10], "multi-gauge": [103, 110, 109], "network": {"tx": 2006223737, "rx": 802337279}, "created_at": "2021-04-28T19:36:00.000Z", "running": true, "number_of_containers": 2, "tags": ["backend", "prod", "us-west2"], "values": [4, 1, 2]}}}' - '{"index": {}}' - - '{"@timestamp": "2021-04-28T20:51:04.467Z", "metricset": "pod", "k8s": {"pod": {"name": "cat", "uid":"947e4ced-1786-4e53-9e0c-5c447e959507", "ip": "10.10.55.22", "network": {"tx": 2012916202, "rx": 803685721}, "created_at": "2021-04-28T19:37:00.000Z", "running": true, "number_of_containers": 2, "tags": ["backend", "prod"], "values": [2, 3, 1]}}}' + - '{"@timestamp": "2021-04-28T20:51:04.467Z", "metricset": "pod", "k8s": {"pod": {"name": "cat", "uid":"947e4ced-1786-4e53-9e0c-5c447e959507", "ip": "10.10.55.22", "multi-counter" : [101, 102, 105], "multi-gauge": [100, 100, 100], "network": {"tx": 2012916202, "rx": 803685721}, "created_at": "2021-04-28T19:37:00.000Z", "running": true, "number_of_containers": 2, "tags": ["backend", "prod"], "values": [2, 3, 1]}}}' - '{"index": {}}' - - '{"@timestamp": "2021-04-28T18:50:03.142Z", "metricset": "pod", "k8s": {"pod": {"name": "dog", "uid":"df3145b3-0563-4d3b-a0f7-897eb2876ea9", "ip": "10.10.55.33", "network": {"tx": 1434521831, "rx": 530575198}, "created_at": "2021-04-28T19:42:00.000Z", "running": false, "number_of_containers": 1, "tags": ["backend", "test"], "values": [2, 3, 4]}}}' + - '{"@timestamp": "2021-04-28T18:50:03.142Z", "metricset": "pod", "k8s": {"pod": {"name": "dog", "uid":"df3145b3-0563-4d3b-a0f7-897eb2876ea9", "ip": "10.10.55.33", "multi-counter" : [7, 11, 44], "multi-gauge": [100, 100, 102], "network": {"tx": 1434521831, "rx": 530575198}, "created_at": "2021-04-28T19:42:00.000Z", "running": false, "number_of_containers": 1, "tags": ["backend", "test"], "values": [2, 3, 4]}}}' - '{"index": {}}' - - '{"@timestamp": "2021-04-28T18:50:23.142Z", "metricset": "pod", "k8s": {"pod": {"name": "dog", "uid":"df3145b3-0563-4d3b-a0f7-897eb2876ea9", "ip": "10.10.55.56", "network": {"tx": 1434577921, "rx": 530600088}, "created_at": "2021-04-28T19:43:00.000Z", "running": false, "number_of_containers": 1, "tags": ["backend", "test", "us-west2"], "values": [2, 1, 1]}}}' + - '{"@timestamp": "2021-04-28T18:50:23.142Z", "metricset": "pod", "k8s": {"pod": {"name": "dog", "uid":"df3145b3-0563-4d3b-a0f7-897eb2876ea9", "ip": "10.10.55.56", "multi-counter" : [0, 0, 1], "multi-gauge": [101, 102, 102], "network": {"tx": 1434577921, "rx": 530600088}, "created_at": "2021-04-28T19:43:00.000Z", "running": false, "number_of_containers": 1, "tags": ["backend", "test", "us-west2"], "values": [2, 1, 1]}}}' - '{"index": {}}' - - '{"@timestamp": "2021-04-28T19:50:53.142Z", "metricset": "pod", "k8s": {"pod": {"name": "dog", "uid":"df3145b3-0563-4d3b-a0f7-897eb2876ea9", "ip": "10.10.55.37", "network": {"tx": 1434587694, "rx": 530604797}, "created_at": "2021-04-28T19:44:00.000Z", "running": true, "number_of_containers": 1, "tags": ["backend", "test", "us-west1"], "values": [4, 5, 2]}}}' + - '{"@timestamp": "2021-04-28T19:50:53.142Z", "metricset": "pod", "k8s": {"pod": {"name": "dog", "uid":"df3145b3-0563-4d3b-a0f7-897eb2876ea9", "ip": "10.10.55.37", "multi-counter" : [1000, 1001, 1002], "multi-gauge": [99, 100, 110], "network": {"tx": 1434587694, "rx": 530604797}, "created_at": "2021-04-28T19:44:00.000Z", "running": true, "number_of_containers": 1, "tags": ["backend", "test", "us-west1"], "values": [4, 5, 2]}}}' - '{"index": {}}' - - '{"@timestamp": "2021-04-28T19:51:03.142Z", "metricset": "pod", "k8s": {"pod": {"name": "dog", "uid":"df3145b3-0563-4d3b-a0f7-897eb2876ea9", "ip": "10.10.55.120", "network": {"tx": 1434595272, "rx": 530605511}, "created_at": "2021-04-28T19:45:00.000Z", "running": true, "number_of_containers": 1, "tags": ["backend", "test", "us-west1"], "values": [3, 2, 1]}}}' + - '{"@timestamp": "2021-04-28T19:51:03.142Z", "metricset": "pod", "k8s": {"pod": {"name": "dog", "uid":"df3145b3-0563-4d3b-a0f7-897eb2876ea9", "ip": "10.10.55.120", "multi-counter" : [76, 77, 78], "multi-gauge": [95, 98, 100], "network": {"tx": 1434595272, "rx": 530605511}, "created_at": "2021-04-28T19:45:00.000Z", "running": true, "number_of_containers": 1, "tags": ["backend", "test", "us-west1"], "values": [3, 2, 1]}}}' - do: indices.put_settings: @@ -106,6 +112,11 @@ setup: - match: { hits.hits.0._source.k8s\.pod\.uid: 947e4ced-1786-4e53-9e0c-5c447e959507 } - match: { hits.hits.0._source.metricset: pod } - match: { hits.hits.0._source.@timestamp: 2021-04-28T18:00:00.000Z } + - match: { hits.hits.0._source.k8s\.pod\.multi-counter: 21 } + - match: { hits.hits.0._source.k8s\.pod\.multi-gauge.min: 90 } + - match: { hits.hits.0._source.k8s\.pod\.multi-gauge.max: 200 } + - match: { hits.hits.0._source.k8s\.pod\.multi-gauge.sum: 726 } + - match: { hits.hits.0._source.k8s\.pod\.multi-gauge.value_count: 6 } - match: { hits.hits.0._source.k8s\.pod\.network\.tx.min: 2001818691 } - match: { hits.hits.0._source.k8s\.pod\.network\.tx.max: 2005177954 } - match: { hits.hits.0._source.k8s\.pod\.network\.tx.value_count: 2 } diff --git a/x-pack/plugin/rollup/src/main/java/org/elasticsearch/xpack/rollup/action/TransportGetRollupCapsAction.java b/x-pack/plugin/rollup/src/main/java/org/elasticsearch/xpack/rollup/action/TransportGetRollupCapsAction.java index cbb5e0394558e..90a626517c044 100644 --- a/x-pack/plugin/rollup/src/main/java/org/elasticsearch/xpack/rollup/action/TransportGetRollupCapsAction.java +++ b/x-pack/plugin/rollup/src/main/java/org/elasticsearch/xpack/rollup/action/TransportGetRollupCapsAction.java @@ -15,6 +15,7 @@ import org.elasticsearch.cluster.service.ClusterService; import org.elasticsearch.common.inject.Inject; import org.elasticsearch.tasks.Task; +import org.elasticsearch.threadpool.ThreadPool; import org.elasticsearch.transport.TransportService; import org.elasticsearch.xpack.core.rollup.action.GetRollupCapsAction; import org.elasticsearch.xpack.core.rollup.action.RollableIndexCaps; @@ -33,7 +34,7 @@ public class TransportGetRollupCapsAction extends HandledTransportAction fieldValues) { - final Object[] value = fieldValues.apply(docValueCount); + final Object[] values = fieldValues.apply(docValueCount); if (metricFieldProducers.containsKey(field)) { // TODO: missing support for array metrics - collectMetric(field, value[0]); + collectMetric(field, values); } else if (labelFieldProducers.containsKey(field)) { - if (value.length == 1) { - collectLabel(field, value[0]); + if (values.length == 1) { + collectLabel(field, values[0]); } else { - collectLabel(field, value); + collectLabel(field, values); } } else { throw new IllegalArgumentException( @@ -423,13 +423,15 @@ private void collectLabel(final String field, final Object value) { labelFieldProducers.get(field).collect(value); } - private void collectMetric(final String field, final Object value) { - if (value instanceof Number number) { - metricFieldProducers.get(field).collect(number); - } else { - throw new IllegalArgumentException( - "Expected numeric value for field '" + field + "' but got non numeric value: '" + value + "'" - ); + private void collectMetric(final String field, final Object[] values) { + for (var value : values) { + if (value instanceof Number number) { + metricFieldProducers.get(field).collect(number); + } else { + throw new IllegalArgumentException( + "Expected numeric value for field '" + field + "' but got non numeric value: '" + value + "'" + ); + } } } diff --git a/x-pack/plugin/rollup/src/test/java/org/elasticsearch/xpack/rollup/v2/RollupActionSingleNodeTests.java b/x-pack/plugin/rollup/src/test/java/org/elasticsearch/xpack/rollup/v2/RollupActionSingleNodeTests.java index 12ab12e3da3d4..7f1dde9948788 100644 --- a/x-pack/plugin/rollup/src/test/java/org/elasticsearch/xpack/rollup/v2/RollupActionSingleNodeTests.java +++ b/x-pack/plugin/rollup/src/test/java/org/elasticsearch/xpack/rollup/v2/RollupActionSingleNodeTests.java @@ -94,9 +94,11 @@ import java.util.Map; import java.util.Optional; import java.util.concurrent.ExecutionException; +import java.util.concurrent.TimeUnit; import java.util.stream.Collectors; import static org.elasticsearch.index.mapper.TimeSeriesParams.TIME_SERIES_METRIC_PARAM; +import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.assertAcked; import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.assertHitCount; import static org.hamcrest.Matchers.containsString; @@ -163,57 +165,59 @@ public void setup() { * check that the value of the label (last value) matches the value * of the corresponding metric which uses a last_value metric type. */ - client().admin() - .indices() - .prepareCreate(sourceIndex) - .setSettings( - Settings.builder() - .put("index.number_of_shards", numOfShards) - .put("index.number_of_replicas", numOfReplicas) - .put(IndexSettings.MODE.getKey(), IndexMode.TIME_SERIES) - .putList(IndexMetadata.INDEX_ROUTING_PATH.getKey(), List.of(FIELD_DIMENSION_1)) - .put( - IndexSettings.TIME_SERIES_START_TIME.getKey(), - DateFieldMapper.DEFAULT_DATE_TIME_FORMATTER.formatMillis(Instant.ofEpochMilli(startTime).toEpochMilli()) - ) - .put(IndexSettings.TIME_SERIES_END_TIME.getKey(), "2106-01-08T23:40:53.384Z") - .build() - ) - .setMapping( - FIELD_TIMESTAMP, - "type=date", - FIELD_DIMENSION_1, - "type=keyword,time_series_dimension=true", - FIELD_DIMENSION_2, - "type=long,time_series_dimension=true", - FIELD_NUMERIC_1, - "type=long,time_series_metric=gauge", - FIELD_NUMERIC_2, - "type=double,time_series_metric=counter", - FIELD_LABEL_DOUBLE, - "type=double", - FIELD_LABEL_INTEGER, - "type=integer", - FIELD_LABEL_KEYWORD, - "type=keyword", - FIELD_LABEL_TEXT, - "type=text", - FIELD_LABEL_BOOLEAN, - "type=boolean", - FIELD_METRIC_LABEL_DOUBLE, /* numeric label indexed as a metric */ - "type=double,time_series_metric=counter", - FIELD_LABEL_IPv4_ADDRESS, - "type=ip", - FIELD_LABEL_IPv6_ADDRESS, - "type=ip", - FIELD_LABEL_DATE, - "type=date,format=date_optional_time", - FIELD_LABEL_KEYWORD_ARRAY, - "type=keyword", - FIELD_LABEL_DOUBLE_ARRAY, - "type=double" - ) - .get(); + assertAcked( + client().admin() + .indices() + .prepareCreate(sourceIndex) + .setSettings( + Settings.builder() + .put("index.number_of_shards", numOfShards) + .put("index.number_of_replicas", numOfReplicas) + .put(IndexSettings.MODE.getKey(), IndexMode.TIME_SERIES) + .putList(IndexMetadata.INDEX_ROUTING_PATH.getKey(), List.of(FIELD_DIMENSION_1)) + .put( + IndexSettings.TIME_SERIES_START_TIME.getKey(), + DateFieldMapper.DEFAULT_DATE_TIME_FORMATTER.formatMillis(Instant.ofEpochMilli(startTime).toEpochMilli()) + ) + .put(IndexSettings.TIME_SERIES_END_TIME.getKey(), "2106-01-08T23:40:53.384Z") + .build() + ) + .setMapping( + FIELD_TIMESTAMP, + "type=date", + FIELD_DIMENSION_1, + "type=keyword,time_series_dimension=true", + FIELD_DIMENSION_2, + "type=long,time_series_dimension=true", + FIELD_NUMERIC_1, + "type=long,time_series_metric=gauge", + FIELD_NUMERIC_2, + "type=double,time_series_metric=counter", + FIELD_LABEL_DOUBLE, + "type=double", + FIELD_LABEL_INTEGER, + "type=integer", + FIELD_LABEL_KEYWORD, + "type=keyword", + FIELD_LABEL_TEXT, + "type=text", + FIELD_LABEL_BOOLEAN, + "type=boolean", + FIELD_METRIC_LABEL_DOUBLE, /* numeric label indexed as a metric */ + "type=double,time_series_metric=counter", + FIELD_LABEL_IPv4_ADDRESS, + "type=ip", + FIELD_LABEL_IPv6_ADDRESS, + "type=ip", + FIELD_LABEL_DATE, + "type=date,format=date_optional_time", + FIELD_LABEL_KEYWORD_ARRAY, + "type=keyword", + FIELD_LABEL_DOUBLE_ARRAY, + "type=double" + ) + .get() + ); } public void testRollupIndex() throws IOException { @@ -285,8 +289,7 @@ public void testCopyIndexSettings() throws IOException { logger.info("Updating index [{}] with settings [{}]", sourceIndex, settings); var updateSettingsReq = new UpdateSettingsRequest(settings, sourceIndex); - var r = client().admin().indices().updateSettings(updateSettingsReq).actionGet(); - assertTrue("Update settings not acked", r.isAcknowledged()); + assertAcked(client().admin().indices().updateSettings(updateSettingsReq).actionGet()); RollupActionConfig config = new RollupActionConfig(randomInterval()); SourceSupplier sourceSupplier = () -> { @@ -361,7 +364,13 @@ public void testCannotRollupToExistingIndex() throws Exception { prepareSourceIndex(sourceIndex); // Create an empty index with the same name as the rollup index - client().admin().indices().prepareCreate(rollupIndex).get(); + assertAcked( + client().admin() + .indices() + .prepareCreate(rollupIndex) + .setSettings(Settings.builder().put("index.number_of_shards", 1).put("index.number_of_replicas", 0).build()) + .get() + ); ResourceAlreadyExistsException exception = expectThrows( ResourceAlreadyExistsException.class, () -> rollup(sourceIndex, rollupIndex, config) @@ -433,14 +442,34 @@ public void testCannotRollupWhileOtherRollupInProgress() throws Exception { .endObject(); bulkIndex(sourceSupplier); prepareSourceIndex(sourceIndex); - client().execute(RollupAction.INSTANCE, new RollupAction.Request(sourceIndex, rollupIndex, config), ActionListener.noop()); + var rollupListener = new ActionListener() { + boolean success; + + @Override + public void onResponse(AcknowledgedResponse acknowledgedResponse) { + if (acknowledgedResponse.isAcknowledged()) { + success = true; + } else { + fail("Failed to receive rollup acknowledgement"); + } + } + + @Override + public void onFailure(Exception e) { + fail("Rollup failed: " + e.getMessage()); + } + }; + client().execute(RollupAction.INSTANCE, new RollupAction.Request(sourceIndex, rollupIndex, config), rollupListener); ResourceAlreadyExistsException exception = expectThrows( ResourceAlreadyExistsException.class, () -> rollup(sourceIndex, rollupIndex, config) ); assertThat(exception.getMessage(), containsString(rollupIndex)); + // We must wait until the in-progress rollup ends, otherwise data will not be cleaned up + assertBusy(() -> assertTrue("In progress rollup did not complete", rollupListener.success), 60, TimeUnit.SECONDS); } + @AwaitsFix(bugUrl = "https://github.com/elastic/elasticsearch/issues/88800") public void testRollupDatastream() throws Exception { RollupActionConfig config = new RollupActionConfig(randomInterval()); String dataStreamName = createDataStream(); @@ -520,23 +549,22 @@ private void bulkIndex(String indexName, SourceSupplier sourceSupplier) throws I private void prepareSourceIndex(String sourceIndex) { // Set the source index to read-only state - AcknowledgedResponse r = client().admin() - .indices() - .prepareUpdateSettings(sourceIndex) - .setSettings(Settings.builder().put(IndexMetadata.INDEX_BLOCKS_WRITE_SETTING.getKey(), true).build()) - .get(); - assertTrue(r.isAcknowledged()); + assertAcked( + client().admin() + .indices() + .prepareUpdateSettings(sourceIndex) + .setSettings(Settings.builder().put(IndexMetadata.INDEX_BLOCKS_WRITE_SETTING.getKey(), true).build()) + .get() + ); } private void rollup(String sourceIndex, String rollupIndex, RollupActionConfig config) { - AcknowledgedResponse response = client().execute(RollupAction.INSTANCE, new RollupAction.Request(sourceIndex, rollupIndex, config)) - .actionGet(); - assertTrue(response.isAcknowledged()); + assertAcked(client().execute(RollupAction.INSTANCE, new RollupAction.Request(sourceIndex, rollupIndex, config)).actionGet()); } private RolloverResponse rollover(String dataStreamName) throws ExecutionException, InterruptedException { RolloverResponse response = client().admin().indices().rolloverIndex(new RolloverRequest(dataStreamName, null)).get(); - assertTrue(response.isAcknowledged()); + assertAcked(response); return response; } @@ -886,12 +914,8 @@ private String createDataStream() throws Exception { ); PutComposableIndexTemplateAction.Request request = new PutComposableIndexTemplateAction.Request(dataStreamName + "_template") .indexTemplate(template); - AcknowledgedResponse response = client().execute(PutComposableIndexTemplateAction.INSTANCE, request).actionGet(); - - assertTrue(response.isAcknowledged()); - assertTrue( - client().execute(CreateDataStreamAction.INSTANCE, new CreateDataStreamAction.Request(dataStreamName)).get().isAcknowledged() - ); + assertAcked(client().execute(PutComposableIndexTemplateAction.INSTANCE, request).actionGet()); + assertAcked(client().execute(CreateDataStreamAction.INSTANCE, new CreateDataStreamAction.Request(dataStreamName)).get()); return dataStreamName; } } diff --git a/x-pack/plugin/security/qa/operator-privileges-tests/src/javaRestTest/java/org/elasticsearch/xpack/security/operator/Constants.java b/x-pack/plugin/security/qa/operator-privileges-tests/src/javaRestTest/java/org/elasticsearch/xpack/security/operator/Constants.java index b39b1a143a980..c0893e03a34e5 100644 --- a/x-pack/plugin/security/qa/operator-privileges-tests/src/javaRestTest/java/org/elasticsearch/xpack/security/operator/Constants.java +++ b/x-pack/plugin/security/qa/operator-privileges-tests/src/javaRestTest/java/org/elasticsearch/xpack/security/operator/Constants.java @@ -245,9 +245,6 @@ public class Constants { "cluster:internal/xpack/ml/trained_models/cache/info", "cluster:internal/xpack/ml/trained_models/deployments/stats/get", "cluster:internal/xpack/transform/reset_mode", - "cluster:internal/master_history/get", - "cluster:internal/coordination_diagnostics/info", - "cluster:internal/formation/info", "cluster:monitor/allocation/explain", "cluster:monitor/async_search/status", "cluster:monitor/ccr/follow_info", @@ -481,6 +478,9 @@ public class Constants { "internal:admin/xpack/searchable_snapshots/frozen_cache_info", "internal:admin/xpack/searchable_snapshots/frozen_cache_info[n]", "internal:cluster/nodes/indices/shard/store", + "internal:cluster/master_history/get", + "internal:cluster/coordination_diagnostics/info", + "internal:cluster/formation/info", "internal:gateway/local/started_shards" ); } diff --git a/x-pack/plugin/security/src/main/java/org/elasticsearch/xpack/security/action/oidc/TransportOpenIdConnectPrepareAuthenticationAction.java b/x-pack/plugin/security/src/main/java/org/elasticsearch/xpack/security/action/oidc/TransportOpenIdConnectPrepareAuthenticationAction.java index 57c80cf28809e..f92421dc662a1 100644 --- a/x-pack/plugin/security/src/main/java/org/elasticsearch/xpack/security/action/oidc/TransportOpenIdConnectPrepareAuthenticationAction.java +++ b/x-pack/plugin/security/src/main/java/org/elasticsearch/xpack/security/action/oidc/TransportOpenIdConnectPrepareAuthenticationAction.java @@ -13,7 +13,6 @@ import org.elasticsearch.action.support.HandledTransportAction; import org.elasticsearch.common.Strings; import org.elasticsearch.common.inject.Inject; -import org.elasticsearch.common.io.stream.Writeable; import org.elasticsearch.tasks.Task; import org.elasticsearch.transport.TransportService; import org.elasticsearch.xpack.core.security.action.oidc.OpenIdConnectPrepareAuthenticationAction; @@ -24,7 +23,6 @@ import org.elasticsearch.xpack.security.authc.oidc.OpenIdConnectRealm; import java.util.List; -import java.util.stream.Collectors; public class TransportOpenIdConnectPrepareAuthenticationAction extends HandledTransportAction< OpenIdConnectPrepareAuthenticationRequest, @@ -42,7 +40,7 @@ public TransportOpenIdConnectPrepareAuthenticationAction( OpenIdConnectPrepareAuthenticationAction.NAME, transportService, actionFilters, - (Writeable.Reader) OpenIdConnectPrepareAuthenticationRequest::new + OpenIdConnectPrepareAuthenticationRequest::new ); this.realms = realms; } @@ -58,15 +56,17 @@ protected void doExecute( List matchingRealms = this.realms.stream() .filter(r -> r instanceof OpenIdConnectRealm && ((OpenIdConnectRealm) r).isIssuerValid(request.getIssuer())) .map(r -> (OpenIdConnectRealm) r) - .collect(Collectors.toList()); + .toList(); if (matchingRealms.isEmpty()) { listener.onFailure( new ElasticsearchSecurityException("Cannot find OpenID Connect realm with issuer [{}]", request.getIssuer()) ); + return; } else if (matchingRealms.size() > 1) { listener.onFailure( new ElasticsearchSecurityException("Found multiple OpenID Connect realm with issuer [{}]", request.getIssuer()) ); + return; } else { realm = matchingRealms.get(0); } diff --git a/x-pack/plugin/shutdown/qa/multi-node/src/javaRestTest/java/org/elasticsearch/xpack/shutdown/NodeShutdownIT.java b/x-pack/plugin/shutdown/qa/multi-node/src/javaRestTest/java/org/elasticsearch/xpack/shutdown/NodeShutdownIT.java index ceda1092f00e6..3d9a38e1ca5da 100644 --- a/x-pack/plugin/shutdown/qa/multi-node/src/javaRestTest/java/org/elasticsearch/xpack/shutdown/NodeShutdownIT.java +++ b/x-pack/plugin/shutdown/qa/multi-node/src/javaRestTest/java/org/elasticsearch/xpack/shutdown/NodeShutdownIT.java @@ -367,9 +367,10 @@ public void testStalledShardMigrationProperlyDetected() throws Exception { ObjectPath.eval("nodes.0.shard_migration.explanation", status), allOf( containsString(indexName), - containsString("cannot move, use the Cluster Allocation Explain API on this shard for details") + containsString("cannot move, see [node_allocation_decision] for details or use the cluster allocation explain API") ) ); + assertThat(ObjectPath.eval("nodes.0.shard_migration.node_allocation_decision", status), notNullValue()); } // Now update the allocation requirements to unblock shard relocation diff --git a/x-pack/plugin/shutdown/src/main/java/org/elasticsearch/xpack/shutdown/TransportGetShutdownStatusAction.java b/x-pack/plugin/shutdown/src/main/java/org/elasticsearch/xpack/shutdown/TransportGetShutdownStatusAction.java index a87b72eb2ffa1..ceb37f7fdcd46 100644 --- a/x-pack/plugin/shutdown/src/main/java/org/elasticsearch/xpack/shutdown/TransportGetShutdownStatusAction.java +++ b/x-pack/plugin/shutdown/src/main/java/org/elasticsearch/xpack/shutdown/TransportGetShutdownStatusAction.java @@ -49,6 +49,7 @@ import java.util.concurrent.atomic.AtomicInteger; import java.util.stream.Collectors; +import static org.elasticsearch.cluster.metadata.ShutdownShardMigrationStatus.NODE_ALLOCATION_DECISION_KEY; import static org.elasticsearch.core.Strings.format; public class TransportGetShutdownStatusAction extends TransportMasterNodeAction< @@ -291,10 +292,11 @@ static ShutdownShardMigrationStatus shardMigrationStatus( SingleNodeShutdownMetadata.Status.STALLED, totalRemainingShards, format( - "shard [%s] [%s] of index [%s] cannot move, use the Cluster Allocation Explain API on this shard for details", + "shard [%s] [%s] of index [%s] cannot move, see [%s] for details or use the cluster allocation explain API", shardRouting.shardId().getId(), shardRouting.primary() ? "primary" : "replica", - shardRouting.index().getName() + shardRouting.index().getName(), + NODE_ALLOCATION_DECISION_KEY ), decision ); diff --git a/x-pack/plugin/spatial/src/main/java/org/elasticsearch/xpack/spatial/search/aggregations/InternalGeoLine.java b/x-pack/plugin/spatial/src/main/java/org/elasticsearch/xpack/spatial/search/aggregations/InternalGeoLine.java index 67205a3d1d26e..89cbeef0a55f1 100644 --- a/x-pack/plugin/spatial/src/main/java/org/elasticsearch/xpack/spatial/search/aggregations/InternalGeoLine.java +++ b/x-pack/plugin/spatial/src/main/java/org/elasticsearch/xpack/spatial/search/aggregations/InternalGeoLine.java @@ -227,8 +227,13 @@ public Map geoJSONGeometry() { ); } final Map geoJSON = new HashMap<>(); - geoJSON.put("type", "LineString"); - geoJSON.put("coordinates", coordinates.toArray()); + if (coordinates.size() == 1) { + geoJSON.put("type", "Point"); + geoJSON.put("coordinates", coordinates.get(0)); + } else { + geoJSON.put("type", "LineString"); + geoJSON.put("coordinates", coordinates.toArray()); + } return geoJSON; } } diff --git a/x-pack/plugin/spatial/src/test/java/org/elasticsearch/xpack/spatial/search/aggregations/GeoLineAggregatorTests.java b/x-pack/plugin/spatial/src/test/java/org/elasticsearch/xpack/spatial/search/aggregations/GeoLineAggregatorTests.java index ad8d47781a4aa..fb19a481a69d2 100644 --- a/x-pack/plugin/spatial/src/test/java/org/elasticsearch/xpack/spatial/search/aggregations/GeoLineAggregatorTests.java +++ b/x-pack/plugin/spatial/src/test/java/org/elasticsearch/xpack/spatial/search/aggregations/GeoLineAggregatorTests.java @@ -263,6 +263,40 @@ public void testEmpty() throws IOException { testCase(new MatchAllDocsQuery(), aggregationBuilder, iw -> {}, terms -> { assertTrue(terms.getBuckets().isEmpty()); }); } + public void testOnePoint() throws IOException { + int size = randomIntBetween(1, GeoLineAggregationBuilder.MAX_PATH_SIZE); + MultiValuesSourceFieldConfig valueConfig = new MultiValuesSourceFieldConfig.Builder().setFieldName("value_field").build(); + MultiValuesSourceFieldConfig sortConfig = new MultiValuesSourceFieldConfig.Builder().setFieldName("sort_field").build(); + GeoLineAggregationBuilder lineAggregationBuilder = new GeoLineAggregationBuilder("_name").point(valueConfig) + .sortOrder(SortOrder.ASC) + .sort(sortConfig) + .size(size); + TermsAggregationBuilder aggregationBuilder = new TermsAggregationBuilder("_name").field("group_id") + .subAggregation(lineAggregationBuilder); + double lon = GeoEncodingUtils.decodeLongitude(randomInt()); + double lat = GeoEncodingUtils.decodeLatitude(randomInt()); + testCase(new MatchAllDocsQuery(), aggregationBuilder, iw -> { + iw.addDocument( + Arrays.asList( + new LatLonDocValuesField("value_field", lat, lon), + new SortedNumericDocValuesField("sort_field", NumericUtils.doubleToSortableLong(randomDouble())), + new SortedDocValuesField("group_id", new BytesRef("groupOrd")) + ) + ); + }, terms -> { + assertEquals(1, terms.getBuckets().size()); + InternalGeoLine geoLine = terms.getBuckets().get(0).getAggregations().get("_name"); + assertNotNull(geoLine); + Map geojson = geoLine.geoJSONGeometry(); + assertEquals("Point", geojson.get("type")); + assertTrue(geojson.get("coordinates") instanceof double[]); + double[] coordinates = (double[]) geojson.get("coordinates"); + assertEquals(2, coordinates.length); + assertEquals(lon, coordinates[0], 1e-6); + assertEquals(lat, coordinates[1], 1e-6); + }); + } + private void testAggregator(SortOrder sortOrder) throws IOException { int size = randomIntBetween(1, GeoLineAggregationBuilder.MAX_PATH_SIZE); MultiValuesSourceFieldConfig valueConfig = new MultiValuesSourceFieldConfig.Builder().setFieldName("value_field").build(); diff --git a/x-pack/plugin/sql/qa/jdbc/build.gradle b/x-pack/plugin/sql/qa/jdbc/build.gradle index b377b70aead6e..ad6e71dce5940 100644 --- a/x-pack/plugin/sql/qa/jdbc/build.gradle +++ b/x-pack/plugin/sql/qa/jdbc/build.gradle @@ -2,7 +2,7 @@ import org.elasticsearch.gradle.internal.BwcVersions.UnreleasedVersionInfo import org.elasticsearch.gradle.Version import org.elasticsearch.gradle.VersionProperties import org.elasticsearch.gradle.internal.info.BuildParams -import org.elasticsearch.gradle.internal.test.RestIntegTestTask +import org.elasticsearch.gradle.testclusters.StandaloneRestIntegTestTask description = 'Integration tests for SQL JDBC driver' apply plugin: 'elasticsearch.java' @@ -74,6 +74,7 @@ subprojects { // Compatibility testing for JDBC driver started with version 7.9.0 BuildParams.bwcVersions.allIndexCompatible.findAll({ it.onOrAfter(Version.fromString("7.9.0")) && it != VersionProperties.elasticsearchVersion }).each { bwcVersion -> def baseName = "v${bwcVersion}" + def cluster = testClusters.maybeCreate(baseName) UnreleasedVersionInfo unreleasedVersion = BuildParams.bwcVersions.unreleasedInfo(bwcVersion) Configuration driverConfiguration = configurations.create("jdbcDriver${baseName}") { @@ -92,14 +93,16 @@ subprojects { dependencies { "jdbcDriver${baseName}"(driverDependency) - } final String bwcVersionString = bwcVersion.toString() - tasks.register(bwcTaskName(bwcVersion), RestIntegTestTask) { + tasks.register(bwcTaskName(bwcVersion), StandaloneRestIntegTestTask) { + useCluster cluster classpath = sourceSets.javaRestTest.runtimeClasspath + driverConfiguration testClassesDirs = sourceSets.javaRestTest.output.classesDirs systemProperty 'jdbc.driver.version', bwcVersionString + nonInputProperties.systemProperty('tests.rest.cluster', "${-> cluster.allHttpSocketURI.join(",")}") + nonInputProperties.systemProperty('tests.clustername', baseName) } } } diff --git a/x-pack/plugin/sql/qa/mixed-node/src/javaRestTest/java/org/elasticsearch/xpack/sql/qa/mixed_node/SqlSearchIT.java b/x-pack/plugin/sql/qa/mixed-node/src/javaRestTest/java/org/elasticsearch/xpack/sql/qa/mixed_node/SqlSearchIT.java index f09875a660b98..172f5c9d27e20 100644 --- a/x-pack/plugin/sql/qa/mixed-node/src/javaRestTest/java/org/elasticsearch/xpack/sql/qa/mixed_node/SqlSearchIT.java +++ b/x-pack/plugin/sql/qa/mixed-node/src/javaRestTest/java/org/elasticsearch/xpack/sql/qa/mixed_node/SqlSearchIT.java @@ -188,7 +188,11 @@ private Map prepareTestData( builder.append("\"boolean_field\":" + fieldValues.computeIfAbsent("boolean_field", v -> randomBoolean()) + ","); builder.append("\"ip_field\":\"" + fieldValues.computeIfAbsent("ip_field", v -> "123.123.123.123") + "\","); if (bwcVersion.onOrAfter(VERSION_FIELD_QL_INTRODUCTION)) { - columns.add(columnInfo("version_field", "2.11.4")); + builder.append( + "\"version_field\":\"" + + fieldValues.computeIfAbsent("version_field", v -> randomInt() + "." + randomInt() + "." + randomInt()) + + "\"," + ); } builder.append("\"text_field\": \"" + fieldValues.computeIfAbsent("text_field", v -> randomAlphaOfLength(5)) + "\","); builder.append("\"keyword_field\": \"" + fieldValues.computeIfAbsent("keyword_field", v -> randomAlphaOfLength(5)) + "\","); diff --git a/x-pack/plugin/sql/sql-action/src/main/java/org/elasticsearch/xpack/sql/action/SqlQueryRequest.java b/x-pack/plugin/sql/sql-action/src/main/java/org/elasticsearch/xpack/sql/action/SqlQueryRequest.java index c89d1f6d3a03d..ee2902992deb1 100644 --- a/x-pack/plugin/sql/sql-action/src/main/java/org/elasticsearch/xpack/sql/action/SqlQueryRequest.java +++ b/x-pack/plugin/sql/sql-action/src/main/java/org/elasticsearch/xpack/sql/action/SqlQueryRequest.java @@ -325,7 +325,7 @@ public boolean equals(Object obj) { return super.equals(obj) && fieldMultiValueLeniency == ((SqlQueryRequest) obj).fieldMultiValueLeniency && indexIncludeFrozen == ((SqlQueryRequest) obj).indexIncludeFrozen - && binaryCommunication == ((SqlQueryRequest) obj).binaryCommunication + && Objects.equals(binaryCommunication, ((SqlQueryRequest) obj).binaryCommunication) && keepOnCompletion == ((SqlQueryRequest) obj).keepOnCompletion && allowPartialSearchResults == ((SqlQueryRequest) obj).allowPartialSearchResults && Objects.equals(cursor, ((SqlQueryRequest) obj).cursor) diff --git a/x-pack/plugin/transform/src/main/java/org/elasticsearch/xpack/transform/transforms/scheduling/TransformScheduledTaskQueue.java b/x-pack/plugin/transform/src/main/java/org/elasticsearch/xpack/transform/transforms/scheduling/TransformScheduledTaskQueue.java index 76c957a61a940..e11da6af1c285 100644 --- a/x-pack/plugin/transform/src/main/java/org/elasticsearch/xpack/transform/transforms/scheduling/TransformScheduledTaskQueue.java +++ b/x-pack/plugin/transform/src/main/java/org/elasticsearch/xpack/transform/transforms/scheduling/TransformScheduledTaskQueue.java @@ -44,16 +44,14 @@ class TransformScheduledTaskQueue { } /** - * @return whether the queue is empty. - */ - public synchronized boolean isEmpty() { - return tasks.isEmpty(); - } - - /** - * @return the task with the *lowest* priority. + * @return the task with the *lowest* priority or null if the queue is empty. */ public synchronized TransformScheduledTask first() { + // gh#88991 concurrent access: the empty check must run within the synchronized context + if (tasks.isEmpty()) { + return null; + } + return tasks.first(); } diff --git a/x-pack/plugin/transform/src/main/java/org/elasticsearch/xpack/transform/transforms/scheduling/TransformScheduler.java b/x-pack/plugin/transform/src/main/java/org/elasticsearch/xpack/transform/transforms/scheduling/TransformScheduler.java index 20c3f9b3d0e15..59b77af08aa75 100644 --- a/x-pack/plugin/transform/src/main/java/org/elasticsearch/xpack/transform/transforms/scheduling/TransformScheduler.java +++ b/x-pack/plugin/transform/src/main/java/org/elasticsearch/xpack/transform/transforms/scheduling/TransformScheduler.java @@ -129,12 +129,14 @@ void processScheduledTasks() { } private boolean processScheduledTasksInternal() { - if (scheduledTasks.isEmpty()) { + TransformScheduledTask scheduledTask = scheduledTasks.first(); + + if (scheduledTask == null) { // There are no scheduled tasks, hence, nothing to do return false; } long currentTimeMillis = clock.millis(); - TransformScheduledTask scheduledTask = scheduledTasks.first(); + // Check if the task is eligible for processing if (currentTimeMillis < scheduledTask.getNextScheduledTimeMillis()) { // It is too early to process this task. diff --git a/x-pack/plugin/transform/src/test/java/org/elasticsearch/xpack/transform/transforms/scheduling/TransformScheduledTaskQueueTests.java b/x-pack/plugin/transform/src/test/java/org/elasticsearch/xpack/transform/transforms/scheduling/TransformScheduledTaskQueueTests.java index 42bfa7250c425..4748189745f1b 100644 --- a/x-pack/plugin/transform/src/test/java/org/elasticsearch/xpack/transform/transforms/scheduling/TransformScheduledTaskQueueTests.java +++ b/x-pack/plugin/transform/src/test/java/org/elasticsearch/xpack/transform/transforms/scheduling/TransformScheduledTaskQueueTests.java @@ -17,7 +17,6 @@ import java.util.ArrayList; import java.util.HashSet; import java.util.List; -import java.util.NoSuchElementException; import java.util.Set; import java.util.concurrent.Callable; import java.util.concurrent.Future; @@ -27,7 +26,8 @@ import static org.hamcrest.Matchers.equalTo; import static org.hamcrest.Matchers.hasSize; import static org.hamcrest.Matchers.is; -import static org.junit.Assert.fail; +import static org.hamcrest.Matchers.notNullValue; +import static org.hamcrest.Matchers.nullValue; public class TransformScheduledTaskQueueTests extends ESTestCase { @@ -52,14 +52,14 @@ public void testEmptyQueue() { public void testNonEmptyQueue() { queue.add(createTask("task-1", 5)); - assertThat(queue.isEmpty(), is(false)); + assertThat(queue.first(), is(notNullValue())); } public void testAddAndRemove() { queue.add(createTask("task-1", 5)); queue.add(createTask("task-2", 1)); queue.add(createTask("task-3", 9)); - assertThat(queue.isEmpty(), is(false)); + assertThat(queue.first(), is(notNullValue())); assertThat(queue.getTransformIds(), containsInAnyOrder("task-1", "task-2", "task-3")); assertThat(queue.first(), is(equalTo(createTask("task-2", 1)))); @@ -83,7 +83,7 @@ public void testConcurrentAddAndRemove() throws Exception { assertThat(taskAdded, is(true)); } } - assertThat(queue.isEmpty(), is(false)); + assertThat(queue.first(), is(notNullValue())); assertThat(queue.getTransformIds(), hasSize(100)); { @@ -117,7 +117,7 @@ public void testRemoveNoOp() { queue.add(createTask("task-1", 5)); queue.remove("task-non-existent"); // Verify that the remove operation had no effect - assertThat(queue.isEmpty(), is(false)); + assertThat(queue.first(), is(notNullValue())); assertThat(queue.getTransformIds(), containsInAnyOrder("task-1")); assertThat(queue.first(), is(equalTo(createTask("task-1", 5)))); } @@ -126,7 +126,7 @@ public void testUpdateNoOp() { queue.add(createTask("task-1", 5)); queue.update("task-non-existent", task -> createTask(task.getTransformId(), -999)); // Verify that the update operation had no effect - assertThat(queue.isEmpty(), is(false)); + assertThat(queue.first(), is(notNullValue())); assertThat(queue.getTransformIds(), containsInAnyOrder("task-1")); assertThat(queue.first(), is(equalTo(createTask("task-1", 5)))); } @@ -147,7 +147,7 @@ public void testRemoveAll() { queue.add(createTask("task-7", 0)); queue.add(createTask("task-8", 2)); queue.add(createTask("task-9", 4)); - assertThat(queue.isEmpty(), is(false)); + assertThat(queue.first(), is(notNullValue())); assertThat( queue.getTransformIds(), containsInAnyOrder("task-1", "task-2", "task-3", "task-4", "task-5", "task-6", "task-7", "task-8", "task-9") @@ -155,7 +155,7 @@ public void testRemoveAll() { assertThat(queue.first(), is(equalTo(createTask("task-7", 0)))); List tasksByPriority = new ArrayList<>(); - while (queue.isEmpty() == false) { + while (queue.first() != null) { TransformScheduledTask task = queue.first(); tasksByPriority.add(task); queue.remove(task.getTransformId()); @@ -210,8 +210,7 @@ private static void failUnexpectedCall(Event event) { } private void assertThatQueueIsEmpty() { - assertThat(queue.isEmpty(), is(true)); + assertThat(queue.first(), is(nullValue())); assertThat(queue.getTransformIds(), is(empty())); - expectThrows(NoSuchElementException.class, () -> queue.first()); } } diff --git a/x-pack/qa/kerberos-tests/src/javaRestTest/java/org/elasticsearch/xpack/security/authc/kerberos/KerberosAuthenticationIT.java b/x-pack/qa/kerberos-tests/src/javaRestTest/java/org/elasticsearch/xpack/security/authc/kerberos/KerberosAuthenticationIT.java index 997c92a9444d0..b3a0675e656c3 100644 --- a/x-pack/qa/kerberos-tests/src/javaRestTest/java/org/elasticsearch/xpack/security/authc/kerberos/KerberosAuthenticationIT.java +++ b/x-pack/qa/kerberos-tests/src/javaRestTest/java/org/elasticsearch/xpack/security/authc/kerberos/KerberosAuthenticationIT.java @@ -157,8 +157,22 @@ public void testGetOauth2TokenInExchangeForKerberosTickets() throws PrivilegedAc @SuppressForbidden(reason = "SPNEGO relies on hostnames and we need to ensure host isn't a IP address") protected HttpHost buildHttpHost(String host, int port) { try { - InetAddress inetAddress = InetAddress.getByName(host); - return super.buildHttpHost(inetAddress.getCanonicalHostName(), port); + final InetAddress address = InetAddress.getByName(host); + final String hostname = address.getCanonicalHostName(); + // InetAddress#getCanonicalHostName depends on the system configuration (e.g. /etc/hosts) to return the FQDN. + // In case InetAddress cannot resolve the FQDN it will return the textual representation of the IP address. + if (hostname.equals(address.getHostAddress())) { + if (address.isLoopbackAddress()) { + // Fall-back and return "localhost" for loopback address if it's not resolved. + // This is safe because InetAddress implements a reverse fall-back to loopback address + // in case the resolution of "localhost" hostname fails. + return super.buildHttpHost("localhost", port); + } else { + throw new IllegalStateException("failed to resolve [" + host + "] to FQDN"); + } + } else { + return super.buildHttpHost(hostname, port); + } } catch (UnknownHostException e) { assumeNoException("failed to resolve host [" + host + "]", e); } diff --git a/x-pack/qa/oidc-op-tests/src/javaRestTest/java/org/elasticsearch/xpack/security/authc/oidc/C2IdOpTestCase.java b/x-pack/qa/oidc-op-tests/src/javaRestTest/java/org/elasticsearch/xpack/security/authc/oidc/C2IdOpTestCase.java index 476ebdb18925a..b9e519ccccd27 100644 --- a/x-pack/qa/oidc-op-tests/src/javaRestTest/java/org/elasticsearch/xpack/security/authc/oidc/C2IdOpTestCase.java +++ b/x-pack/qa/oidc-op-tests/src/javaRestTest/java/org/elasticsearch/xpack/security/authc/oidc/C2IdOpTestCase.java @@ -136,59 +136,30 @@ protected String authenticateAtOP(URI opAuthUri) throws Exception { assertThat(initResponse.getAsString("type"), equalTo("auth")); final String sid = initResponse.getAsString("sid"); // Actually authenticate the user with ldapAuth - HttpPost loginHttpPost = new HttpPost(C2ID_LOGIN_API + "authenticateSubject?cacheBuster=" + randomAlphaOfLength(8)); + HttpPost loginHttpPost = new HttpPost( + C2ID_LOGIN_API + "authenticateSubject?cacheBuster=" + randomAlphaOfLength(8) + "&authSessionId=" + sid + ); String loginJson = """ {"username":"alice","password":"secret"}"""; configureJsonRequest(loginHttpPost, loginJson); - JSONObject loginJsonResponse = execute(httpClient, loginHttpPost, context, response -> { + execute(httpClient, loginHttpPost, context, response -> { assertHttpOk(response.getStatusLine()); return parseJsonResponse(response); }); - // Get the consent screen - HttpPut consentFetchHttpPut = new HttpPut( + + HttpPut consentHttpPut = new HttpPut( C2ID_LOGIN_API + "updateAuthRequest" + "/" + sid + "?cacheBuster=" + randomAlphaOfLength(8) ); - String consentFetchJson = """ - { - "sub": "%s", - "acr": "http://loa.c2id.com/basic", - "amr": [ "pwd" ], - "data": { - "email": "%s", - "name": "%s" - } - }""".formatted( - loginJsonResponse.getAsString("id"), - loginJsonResponse.getAsString("email"), - loginJsonResponse.getAsString("name") - ); - configureJsonRequest(consentFetchHttpPut, consentFetchJson); - JSONObject consentFetchResponse = execute(httpClient, consentFetchHttpPut, context, response -> { + String consentJson = """ + {"claims":["name", "email"],"scope":["openid"]}"""; + configureJsonRequest(consentHttpPut, consentJson); + JSONObject jsonConsentResponse = execute(httpClient, consentHttpPut, context, response -> { assertHttpOk(response.getStatusLine()); return parseJsonResponse(response); }); - if (consentFetchResponse.getAsString("type").equals("consent")) { - // If needed, submit the consent - HttpPut consentHttpPut = new HttpPut( - C2ID_LOGIN_API + "updateAuthRequest" + "/" + sid + "?cacheBuster=" + randomAlphaOfLength(8) - ); - String consentJson = """ - {"claims":["name", "email"],"scope":["openid"]}"""; - configureJsonRequest(consentHttpPut, consentJson); - JSONObject jsonConsentResponse = execute(httpClient, consentHttpPut, context, response -> { - assertHttpOk(response.getStatusLine()); - return parseJsonResponse(response); - }); - assertThat(jsonConsentResponse.getAsString("type"), equalTo("response")); - JSONObject parameters = (JSONObject) jsonConsentResponse.get("parameters"); - return parameters.getAsString("uri"); - } else if (consentFetchResponse.getAsString("type").equals("response")) { - JSONObject parameters = (JSONObject) consentFetchResponse.get("parameters"); - return parameters.getAsString("uri"); - } else { - fail("Received an invalid response from the OP"); - return null; - } + assertThat(jsonConsentResponse.getAsString("type"), equalTo("response")); + JSONObject parameters = (JSONObject) jsonConsentResponse.get("parameters"); + return parameters.getAsString("uri"); } } diff --git a/x-pack/qa/repository-old-versions/build.gradle b/x-pack/qa/repository-old-versions/build.gradle index 5d9ca61ca31ed..3bebfdc151810 100644 --- a/x-pack/qa/repository-old-versions/build.gradle +++ b/x-pack/qa/repository-old-versions/build.gradle @@ -9,6 +9,7 @@ import org.apache.tools.ant.taskdefs.condition.Os import org.elasticsearch.gradle.Architecture import org.elasticsearch.gradle.OS import org.elasticsearch.gradle.Version +import org.elasticsearch.gradle.internal.BwcVersions import org.elasticsearch.gradle.internal.info.BuildParams import org.elasticsearch.gradle.internal.test.AntFixture import org.elasticsearch.gradle.testclusters.StandaloneRestIntegTestTask @@ -114,8 +115,8 @@ if (Os.isFamily(Os.FAMILY_WINDOWS)) { false, "path.repo: ${repoLocation}", "path.data: ${dataPath}" - if (version.onOrAfter('6.8.0') && Architecture.current() == Architecture.AARCH64) { - // We need to explicitly disable ML when running old ES versions on ARM + if ((version.onOrAfter('6.8.0') && Architecture.current() == Architecture.AARCH64) || (version.onOrAfter("6.4.0") && BwcVersions.isMlCompatible(version) == false)) { + // We need to explicitly disable ML when running old ES versions on ARM or on systems with newer GLIBC args 'xpack.ml.enabled: false' } doFirst { diff --git a/x-pack/test/idp-fixture/docker-compose.yml b/x-pack/test/idp-fixture/docker-compose.yml index feccc6fae0061..753075c0e4506 100644 --- a/x-pack/test/idp-fixture/docker-compose.yml +++ b/x-pack/test/idp-fixture/docker-compose.yml @@ -162,7 +162,7 @@ services: - ./idp/shib-jetty-base/start.d/ssl.ini:/opt/shib-jetty-base/start.d/ssl.ini oidc-provider: - image: "c2id/c2id-server:9.5" + image: "c2id/c2id-server-demo:12.16.1" depends_on: - http-proxy ports: diff --git a/x-pack/test/idp-fixture/oidc/override.properties b/x-pack/test/idp-fixture/oidc/override.properties index fe4ba4a6e894f..5fd6f5f7ee2d7 100644 --- a/x-pack/test/idp-fixture/oidc/override.properties +++ b/x-pack/test/idp-fixture/oidc/override.properties @@ -2,3 +2,5 @@ op.issuer=http://oidc-provider:8080/c2id op.authz.endpoint=http://oidc-provider:8080/c2id-login/ op.reg.apiAccessTokenSHA256=d1c4fa70d9ee708d13cfa01daa0e060a05a2075a53c5cc1ad79e460e96ab5363 jose.jwkSer=RnVsbCBrZXk6CnsKICAia2V5cyI6IFsKICAgIHsKICAgICAgInAiOiAiLXhhN2d2aW5tY3N3QXU3Vm1mV2loZ2o3U3gzUzhmd2dFSTdMZEVveW5FU1RzcElaeUY5aHc0NVhQZmI5VHlpbzZsOHZTS0F5RmU4T2lOalpkNE1Ra0ttYlJzTmxxR1Y5VlBoWF84UG1JSm5mcGVhb3E5YnZfU0k1blZHUl9zYUUzZE9sTEE2VWpaS0lsRVBNb0ZuRlZCMUFaUU9qQlhRRzZPTDg2eDZ2NHMwIiwKICAgICAgImt0eSI6ICJSU0EiLAogICAgICAicSI6ICJ2Q3pDQUlpdHV0MGx1V0djQloyLUFabURLc1RxNkkxcUp0RmlEYkIyZFBNQVlBNldOWTdaWEZoVWxsSjJrT2ZELWdlYjlkYkN2ODBxNEwyajVZSjZoOTBUc1NRWWVHRlljN1lZMGdCMU5VR3l5cXctb29QN0EtYlJmMGI3b3I4ajZJb0hzQTZKa2JranN6c3otbkJ2U2RmUURlZkRNSVc3Ni1ZWjN0c2hsY2MiLAogICAgICAiZCI6ICJtbFBOcm1zVVM5UmJtX1I5SElyeHdmeFYzZnJ2QzlaQktFZzRzc1ZZaThfY09lSjV2U1hyQV9laEtwa2g4QVhYaUdWUGpQbVlyd29xQzFVUksxUkZmLVg0dG10emV2OUVHaU12Z0JCaEF5RkdTSUd0VUNla2x4Q2dhb3BpMXdZSU1Bd0M0STZwMUtaZURxTVNCWVZGeHA5ZWlJZ2pwb05JbV9lR3hXUUs5VHNnYmk5T3lyc1VqaE9KLVczN2JVMEJWUU56UXpxODhCcGxmNzM3VmV1dy1FeDZaMk1iWXR3SWdfZ0JVb0JEZ0NrZkhoOVE4MElYcEZRV0x1RzgwenFrdkVwTHZ0RWxLbDRvQ3BHVnBjcmFUOFNsOGpYc3FDT1k0dnVRT19LRVUzS2VPNUNJbHd4eEhJYXZjQTE5cHFpSWJ5cm1LbThxS0ZEWHluUFJMSGFNZ1EiLAogICAgICAiZSI6ICJBUUFCIiwKICAgICAgImtpZCI6ICJyc2EzODRfMjA0OCIsCiAgICAgICJxaSI6ICJzMldTamVrVDl3S2JPbk9neGNoaDJPY3VubzE2Y20wS281Z3hoUWJTdVMyMldfUjJBR2ZVdkRieGF0cTRLakQ3THo3X1k2TjdTUkwzUVpudVhoZ1djeXgyNGhrUGppQUZLNmlkYVZKQzJqQmgycEZTUDVTNXZxZ0lsME12eWY4NjlwdkN4S0NzaGRKMGdlRWhveE93VkRPYXJqdTl2Zm9IQV90LWJoRlZrUnciLAogICAgICAiZHAiOiAiQlJhQTFqYVRydG9mTHZBSUJBYW1OSEVhSm51RU9zTVJJMFRCZXFuR1BNUm0tY2RjSG1OUVo5WUtqb2JpdXlmbnhGZ0piVDlSeElBRG0ySkpoZEp5RTN4Y1dTSzhmSjBSM1Jick1aT1dwako0QmJTVzFtU1VtRnlKTGxib3puRFhZR2RaZ1hzS0o1UkFrRUNQZFBCY3YwZVlkbk9NYWhfZndfaFZoNjRuZ2tFIiwKICAgICAgImFsZyI6ICJSU0EzODQiLAogICAgICAiZHEiOiAiUFJoVERKVlR3cDNXaDZfWFZrTjIwMUlpTWhxcElrUDN1UTYyUlRlTDNrQ2ZXSkNqMkZPLTRxcVRIQk0tQjZJWUVPLXpoVWZyQnhiMzJ1djNjS2JDWGFZN3BJSFJxQlFEQWQ2WGhHYzlwc0xqNThXd3VGY2RncERJYUFpRjNyc3NUMjJ4UFVvYkJFTVdBalV3bFJrNEtNTjItMnpLQk5FR3lIcDIzOUpKdnpVIiwKICAgICAgIm4iOiAidUpDWDVDbEZpM0JnTXBvOWhRSVZ2SDh0Vi1jLTVFdG5OeUZxVm91R3NlNWwyUG92MWJGb0tsRllsU25YTzNWUE9KRWR3azNDdl9VT0UtQzlqZERYRHpvS3Z4RURaTVM1TDZWMFpIVEJoNndIOV9iN3JHSlBxLV9RdlNkejczSzZxbHpGaUtQamRvdTF6VlFYTmZfblBZbnRnQkdNRUtBc1pRNGp0cWJCdE5lV0h0MF9UM001cEktTV9KNGVlRWpCTW95TkZuU2ExTEZDVmZRNl9YVnpjelp1TlRGMlh6UmdRWkFmcmJGRXZ6eXR1TzVMZTNTTXFrUUFJeDhFQmkwYXVlRUNqNEQ4cDNVNXFVRG92NEF2VnRJbUZlbFJvb1pBMHJtVW1KRHJ4WExrVkhuVUpzaUF6ZW9TLTNBSnV1bHJkMGpuNjJ5VjZHV2dFWklZMVNlZVd3IgogICAgfQogIF0KfQo +op.authz.alwaysPromptForConsent=true +op.authz.alwaysPromptForAuth=true