diff --git a/.buildkite/pipeline.yml b/.buildkite/pipeline.yml index ddd92c8d3d5..1e3c016fc6b 100644 --- a/.buildkite/pipeline.yml +++ b/.buildkite/pipeline.yml @@ -243,7 +243,6 @@ steps: manual: allowed: true - - wait: ~ continue_on_failure: true - label: "Processing test results" diff --git a/.buildkite/scripts/steps/integration_tests.sh b/.buildkite/scripts/steps/integration_tests.sh index 2a129193267..2aa71c18d3e 100755 --- a/.buildkite/scripts/steps/integration_tests.sh +++ b/.buildkite/scripts/steps/integration_tests.sh @@ -11,7 +11,7 @@ MAGE_SUBTARGET="${3:-""}" # Override the agent package version using a string with format .. # NOTE: use only after version bump when the new version is not yet available, for example: # OVERRIDE_AGENT_PACKAGE_VERSION="8.10.3" otherwise OVERRIDE_AGENT_PACKAGE_VERSION="". -OVERRIDE_AGENT_PACKAGE_VERSION="" +OVERRIDE_AGENT_PACKAGE_VERSION="8.12.0" if [[ -n "$OVERRIDE_AGENT_PACKAGE_VERSION" ]]; then OVERRIDE_TEST_AGENT_VERSION=${OVERRIDE_AGENT_PACKAGE_VERSION}"-SNAPSHOT" diff --git a/.ci/bump-golang.yml b/.ci/bump-golang.yml new file mode 100644 index 00000000000..470c6f4c8d5 --- /dev/null +++ b/.ci/bump-golang.yml @@ -0,0 +1,134 @@ +--- +name: Bump golang-version to latest version + +scms: + githubConfig: + kind: github + spec: + user: '{{ requiredEnv "GIT_USER" }}' + email: '{{ requiredEnv "GIT_EMAIL" }}' + owner: elastic + repository: elastic-agent + token: '{{ requiredEnv "GITHUB_TOKEN" }}' + username: '{{ requiredEnv "GIT_USER" }}' + branch: main + +actions: + elastic-agent: + kind: github/pullrequest + scmid: githubConfig + sourceid: latestGoVersion + spec: + automerge: false + labels: + - dependencies + - backport-skip + title: '[Automation] Bump Golang version to {{ source "latestGoVersion" }}' + description: | + It requires the version to be bumped first in golang-crossbuild project, then a new release will be added to: + https://github.com/elastic/golang-crossbuild/releases/tag/v{{ source "latestGoVersion" }}. + Otherwise it will fail until the docker images are available. + +sources: + minor: + name: Get minor version in .go-version + kind: shell + transformers: + - findsubmatch: + pattern: '^\d+.(\d+).\d+$' + captureindex: 1 + spec: + command: cat .go-version + + latestGoVersion: + name: Get Latest Go Release + kind: githubrelease + dependson: + - minor + transformers: + - trimprefix: go + spec: + owner: golang + repository: go + token: '{{ requiredEnv "GITHUB_TOKEN" }}' + username: '{{ requiredEnv "GIT_USER" }}' + versionfilter: + kind: regex + pattern: go1\.{{ source "minor" }}\.(\d*)$ + + gomod: + dependson: + - latestGoVersion + name: Get version in go.mod format + kind: shell + transformers: + - findsubmatch: + pattern: '^(\d+.\d+).\d+' + captureindex: 1 + spec: + command: echo {{ source "latestGoVersion" }} + +conditions: + dockerTag: + name: Is docker image golang:{{ source "latestGoVersion" }} published + kind: dockerimage + spec: + image: golang + tag: '{{ source "latestGoVersion" }}' + sourceid: latestGoVersion + + goDefaultVersion-check: + name: Check if defined golang version differs + kind: shell + sourceid: latestGoVersion + spec: + command: 'grep -v -q {{ source "latestGoVersion" }} .go-version #' + +targets: + update-go-version: + name: "Update .go-version" + sourceid: latestGoVersion + scmid: githubConfig + kind: file + spec: + content: '{{ source "latestGoVersion" }}' + file: .go-version + matchpattern: '\d+.\d+.\d+' + update-golang.ci: + name: "Update .golangci.yml" + sourceid: latestGoVersion + scmid: githubConfig + kind: file + spec: + content: '{{ source "latestGoVersion" }}' + file: .golangci.yml + matchpattern: '\d+.\d+.\d+' + update-version.asciidoc: + name: "Update version.asciidoc" + sourceid: latestGoVersion + scmid: githubConfig + kind: file + spec: + content: ':go-version: {{ source "latestGoVersion" }}' + file: version/docs/version.asciidoc + matchpattern: ':go-version: \d+.\d+.\d+' + update-dockerfiles: + name: "Update from dockerfiles" + sourceid: latestGoVersion + scmid: githubConfig + kind: file + spec: + content: 'ARG GO_VERSION={{ source "latestGoVersion" }}' + files: + - Dockerfile + - Dockerfile.skaffold + matchpattern: 'ARG GO_VERSION=\d+.\d+.\d+' + update-gomod: + name: "Update go.mod" + sourceid: gomod + scmid: githubConfig + kind: file + spec: + content: 'go {{ source "gomod" }}' + file: go.mod + matchpattern: 'go \d+.\d+' diff --git a/.ci/scripts/install-go.bat b/.ci/scripts/install-go.bat new file mode 100755 index 00000000000..29448bd4f63 --- /dev/null +++ b/.ci/scripts/install-go.bat @@ -0,0 +1,57 @@ +set GOPATH=%WORKSPACE% +set MAGEFILE_CACHE=%WORKSPACE%\.magefile + +set PATH=%WORKSPACE%\bin;C:\ProgramData\chocolatey\bin;%PATH% + +curl --version >nul 2>&1 && ( + echo found curl +) || ( + choco install curl -y --no-progress --skipdownloadcache +) + +mkdir %WORKSPACE%\bin + +IF EXIST "%PROGRAMFILES(X86)%" ( + REM Force the gvm installation. + SET GVM_BIN=gvm.exe + curl -L -o %WORKSPACE%\bin\gvm.exe https://github.com/andrewkroh/gvm/releases/download/v0.3.0/gvm-windows-amd64.exe + IF ERRORLEVEL 1 ( + REM gvm installation has failed. + del bin\gvm.exe /s /f /q + exit /b 1 + ) +) ELSE ( + REM Windows 7 workers got a broken gvm installation. + curl -L -o %WORKSPACE%\bin\gvm.exe https://github.com/andrewkroh/gvm/releases/download/v0.3.0/gvm-windows-386.exe + IF ERRORLEVEL 1 ( + REM gvm installation has failed. + del bin\gvm.exe /s /f /q + exit /b 1 + ) +) + +SET GVM_BIN=gvm.exe +WHERE /q %GVM_BIN% +%GVM_BIN% version + +REM Install the given go version +%GVM_BIN% --debug install %GO_VERSION% + +REM Configure the given go version +FOR /f "tokens=*" %%i IN ('"%GVM_BIN%" use %GO_VERSION% --format=batch') DO %%i + +go env +IF ERRORLEVEL 1 ( + REM go is not configured correctly. + rmdir %WORKSPACE%\.gvm /s /q + exit /b 1 +) + +where mage +mage -version +IF ERRORLEVEL 1 ( + go get github.com/magefile/mage + IF ERRORLEVEL 1 ( + exit /b 1 + ) +) diff --git a/.ci/scripts/install-go.sh b/.ci/scripts/install-go.sh new file mode 100755 index 00000000000..31566c08726 --- /dev/null +++ b/.ci/scripts/install-go.sh @@ -0,0 +1,43 @@ +#!/usr/bin/env bash +set -exuo pipefail + +MSG="environment variable missing" +GO_VERSION=${GO_VERSION:?$MSG} +PROPERTIES_FILE=${PROPERTIES_FILE:-"go_env.properties"} +HOME=${HOME:?$MSG} +OS=$(uname -s| tr '[:upper:]' '[:lower:]') +ARCH=$(uname -m| tr '[:upper:]' '[:lower:]') +GVM_CMD="${HOME}/bin/gvm" + +if command -v go +then + set +e + echo "Found Go. Checking version.." + FOUND_GO_VERSION=$(go version|awk '{print $3}'|sed s/go//) + if [ "$FOUND_GO_VERSION" == "$GO_VERSION" ] + then + echo "Versions match. No need to install Go. Exiting." + exit 0 + fi + set -e +fi + +if [ "${ARCH}" == "aarch64" ] ; then + GVM_ARCH_SUFFIX=arm64 +elif [ "${ARCH}" == "x86_64" ] ; then + GVM_ARCH_SUFFIX=amd64 +elif [ "${ARCH}" == "i686" ] ; then + GVM_ARCH_SUFFIX=386 +else + GVM_ARCH_SUFFIX=arm +fi + +echo "UNMET DEP: Installing Go" +mkdir -p "${HOME}/bin" + +curl -sSLo "${GVM_CMD}" "https://github.com/andrewkroh/gvm/releases/download/v0.3.0/gvm-${OS}-${GVM_ARCH_SUFFIX}" +chmod +x "${GVM_CMD}" + +${GVM_CMD} "${GO_VERSION}" |cut -d ' ' -f 2|tr -d '\"' > ${PROPERTIES_FILE} + +eval "$("${GVM_CMD}" "${GO_VERSION}")" diff --git a/.mergify.yml b/.mergify.yml index 718c46d7093..d7e15073f47 100644 --- a/.mergify.yml +++ b/.mergify.yml @@ -280,3 +280,16 @@ pull_request_rules: labels: - "backport" title: "[{{ destination_branch }}](backport #{{ number }}) {{ title }}" + - name: backport patches to 8.12 branch + conditions: + - merged + - label=backport-v8.12.0 + actions: + backport: + assignees: + - "{{ author }}" + branches: + - "8.12" + labels: + - "backport" + title: "[{{ destination_branch }}](backport #{{ number }}) {{ title }}" diff --git a/Makefile b/Makefile index 25d614471c6..13aab51123b 100644 --- a/Makefile +++ b/Makefile @@ -29,17 +29,7 @@ help: Makefile ## notice : Generates the NOTICE file. .PHONY: notice notice: - @echo "Generating NOTICE" - go mod tidy - go mod download - go list -m -json all | go run go.elastic.co/go-licence-detector \ - -includeIndirect \ - -rules dev-tools/notice/rules.json \ - -overrides dev-tools/notice/overrides.json \ - -noticeTemplate dev-tools/notice/NOTICE.txt.tmpl \ - -noticeOut NOTICE.txt \ - -depsOut "" - cat dev-tools/notice/NOTICE.txt.append >> NOTICE.txt + @mage notice ## check-ci: Run all the checks under the ci, this doesn't include the linter which is run via a github action. .PHONY: check-ci diff --git a/NOTICE.txt b/NOTICE.txt index 70c8ccb2fc1..f65f5b6cb86 100644 --- a/NOTICE.txt +++ b/NOTICE.txt @@ -1377,11 +1377,11 @@ Contents of probable licence file $GOMODCACHE/github.com/elastic/elastic-agent-l -------------------------------------------------------------------------------- Dependency : github.com/elastic/elastic-agent-system-metrics -Version: v0.8.1 +Version: v0.9.1 Licence type (autodetected): Apache-2.0 -------------------------------------------------------------------------------- -Contents of probable licence file $GOMODCACHE/github.com/elastic/elastic-agent-system-metrics@v0.8.1/LICENSE.txt: +Contents of probable licence file $GOMODCACHE/github.com/elastic/elastic-agent-system-metrics@v0.9.1/LICENSE.txt: Apache License Version 2.0, January 2004 @@ -2222,11 +2222,11 @@ Contents of probable licence file $GOMODCACHE/github.com/elastic/go-licenser@v0. -------------------------------------------------------------------------------- Dependency : github.com/elastic/go-sysinfo -Version: v1.11.1 +Version: v1.11.2 Licence type (autodetected): Apache-2.0 -------------------------------------------------------------------------------- -Contents of probable licence file $GOMODCACHE/github.com/elastic/go-sysinfo@v1.11.1/LICENSE.txt: +Contents of probable licence file $GOMODCACHE/github.com/elastic/go-sysinfo@v1.11.2/LICENSE.txt: Apache License diff --git a/_meta/config/common.p2.yml.tmpl b/_meta/config/common.p2.yml.tmpl index f7c82213305..7ca36f155a1 100644 --- a/_meta/config/common.p2.yml.tmpl +++ b/_meta/config/common.p2.yml.tmpl @@ -8,6 +8,7 @@ outputs: api_key: "example-key" #username: "elastic" #password: "changeme" + preset: balanced diff --git a/_meta/config/common.reference.p2.yml.tmpl b/_meta/config/common.reference.p2.yml.tmpl index e9643e6336b..325cd13ab18 100644 --- a/_meta/config/common.reference.p2.yml.tmpl +++ b/_meta/config/common.reference.p2.yml.tmpl @@ -9,6 +9,11 @@ outputs: # username: "elastic" # password: "changeme" + # Performance preset for elasticsearch outputs. One of "balanced", "throughput", + # "scale", "latency" and "custom". + # The default if unspecified is "custom". + preset: balanced + inputs: - type: system/metrics # Each input must have a unique ID. diff --git a/_meta/config/elastic-agent.docker.yml.tmpl b/_meta/config/elastic-agent.docker.yml.tmpl index 989c8c26ad9..134aecf3249 100644 --- a/_meta/config/elastic-agent.docker.yml.tmpl +++ b/_meta/config/elastic-agent.docker.yml.tmpl @@ -7,6 +7,7 @@ outputs: hosts: '${ELASTICSEARCH_HOSTS:http://elasticsearch:9200}' username: '${ELASTICSEARCH_USERNAME:elastic}' password: '${ELASTICSEARCH_PASSWORD:changeme}' + preset: balanced inputs: - type: system/metrics diff --git a/changelog/8.11.2.asciidoc b/changelog/8.11.2.asciidoc new file mode 100644 index 00000000000..7d86b8f2929 --- /dev/null +++ b/changelog/8.11.2.asciidoc @@ -0,0 +1,53 @@ +// begin 8.11.2 relnotes + +[[release-notes-8.11.2]] +== 8.11.2 + +Review important information about the 8.11.2 release. + +[discrete] +[[security-updates-8.11.2]] +=== Security updates + + +elastic-agent:: + +* Update Go Version To 1.20.11. {elastic-agent-pull}https://github.com/elastic/elastic-agent/pull/3748[#https://github.com/elastic/elastic-agent/pull/3748] + + + + + + + + + + +[discrete] +[[enhancements-8.11.2]] +=== Enhancements + + +elastic-agent:: + +* Add Configuration Parameters For The Kubernetes `Leader_election` Provider. {elastic-agent-pull}https://github.com/elastic/elastic-agent/pull/3625[#https://github.com/elastic/elastic-agent/pull/3625] + + + + +[discrete] +[[bug-fixes-8.11.2]] +=== Bug fixes + + +Upgrader:: + +* Fix Fleet-Managed Agent Ignoring The `Agent.download.proxy_url` Setting After A Policy Is Updated. {Upgrader-pull}https://github.com/elastic/elastic-agent/pull/3803[#https://github.com/elastic/elastic-agent/pull/3803] {Upgrader-issue}https://github.com/elastic/elastic-agent/issues/3560[#https://github.com/elastic/elastic-agent/issues/3560] +component:: + +* Custom-Yaml-Marshal-For-Component. {component-pull}https://github.com/elastic/elastic-agent/pull/3835[#https://github.com/elastic/elastic-agent/pull/3835] {component-issue}https://github.com/elastic/elastic-agent/issues/2940[#https://github.com/elastic/elastic-agent/issues/2940] +elastic-agent:: + +* Fix Logging Calls That Incorrectly Use Non-F Variants And Have Missing Arguments. {elastic-agent-pull}https://github.com/elastic/elastic-agent/pull/3679[#https://github.com/elastic/elastic-agent/pull/3679] + +// end 8.11.2 relnotes diff --git a/changelog/8.11.2.yaml b/changelog/8.11.2.yaml new file mode 100644 index 00000000000..10260258663 --- /dev/null +++ b/changelog/8.11.2.yaml @@ -0,0 +1,70 @@ +version: 8.11.2 +entries: + - kind: other + summary: hb-bump-node-18.18.x + description: Update NodeJS version bundled with Heartbeat to v18.18.2. + component: elastic-agent + pr: + - https://github.com/elastic/elastic-agent/pull/3655 + issue: [] + timestamp: 1698247934 + file: + name: 1698247934-hb-bump-node.yaml + checksum: b316b799a0edf0c6e9bf7db58f3dbcfad6710608 + - kind: bug-fix + summary: Fix logging calls that incorrectly use non-f variants and have missing arguments. + description: "" + component: elastic-agent + pr: + - https://github.com/elastic/elastic-agent/pull/3679 + issue: [] + timestamp: 1698808222 + file: + name: 1698808222-orphan-verbs.yaml + checksum: 12dc7541b29aae73966c462fff63c7107f9ebe35 + - kind: enhancement + summary: Add configuration parameters for the Kubernetes `leader_election` provider. + description: "" + component: elastic-agent + pr: + - https://github.com/elastic/elastic-agent/pull/3625 + issue: [] + timestamp: 1699010487 + file: + name: 1699010487-leaderelectionconfig.yaml + checksum: 95881c900b77e81aa14e28a188240f8c32bacfb9 + - kind: security + summary: Update Go version to 1.20.11. + description: "" + component: elastic-agent + pr: + - https://github.com/elastic/elastic-agent/pull/3748 + issue: [] + timestamp: 1700000391 + file: + name: 1700000391-Upgrade-to-Go-1.20.11.yaml + checksum: 3a5ac3d69411fa1ca3872c27dead2b1683d36f03 + - kind: bug-fix + summary: Fix Fleet-managed Agent ignoring the `agent.download.proxy_url` setting after a policy is updated. + description: "" + component: Upgrader + pr: + - https://github.com/elastic/elastic-agent/pull/3803 + issue: + - https://github.com/elastic/elastic-agent/issues/3560 + timestamp: 1700678892 + file: + name: 1700678892-Fixes-the-Elastic-Agent-ignoring-agent.download.proxy_url-on-policy-update.yaml + checksum: 7d54a106a69fc6042ea997a99829b221cff3d4d6 + - kind: bug-fix + summary: custom-yaml-marshal-for-component + description: Create a custom `MarshalYAML()` method to properly handle error fields in agent diagnostics. + component: component + pr: + - https://github.com/elastic/elastic-agent/pull/3835 + issue: + - https://github.com/elastic/elastic-agent/issues/2940 + timestamp: 1701208315 + file: + name: 1701208315-custom-yaml-marshal-for-component.yaml + checksum: 6126221771bc26bd51566d33bd5873ffbe599fae diff --git a/changelog/8.11.3.asciidoc b/changelog/8.11.3.asciidoc new file mode 100644 index 00000000000..4a3c2bfea11 --- /dev/null +++ b/changelog/8.11.3.asciidoc @@ -0,0 +1,24 @@ +// begin 8.11.3 relnotes + +[[release-notes-8.11.3]] +== 8.11.3 + +Review important information about the 8.11.3 release. + + + + + + + + + + + + + + + + + +// end 8.11.3 relnotes diff --git a/changelog/8.11.3.yaml b/changelog/8.11.3.yaml new file mode 100644 index 00000000000..7152796d543 --- /dev/null +++ b/changelog/8.11.3.yaml @@ -0,0 +1,2 @@ +version: 8.11.3 +# No changes in this release. diff --git a/changelog/fragments/1700000391-Upgrade-to-Go-1.20.11.yaml b/changelog/fragments/1689328899-Elastic-Agent-container-runs-on-Azure-Container-Instances-.yaml similarity index 68% rename from changelog/fragments/1700000391-Upgrade-to-Go-1.20.11.yaml rename to changelog/fragments/1689328899-Elastic-Agent-container-runs-on-Azure-Container-Instances-.yaml index e7b4c74c851..df24e655971 100644 --- a/changelog/fragments/1700000391-Upgrade-to-Go-1.20.11.yaml +++ b/changelog/fragments/1689328899-Elastic-Agent-container-runs-on-Azure-Container-Instances-.yaml @@ -8,25 +8,24 @@ # - security: impacts on the security of a product or a user’s deployment. # - upgrade: important information for someone upgrading from a prior version # - other: does not fit into any of the other categories -kind: security +kind: bug # Change summary; a 80ish characters long description of the change. -summary: Upgrade to Go 1.20.11. +summary: Elastic-Agent container runs on Azure Container Instances # Long description; in case the summary is not enough to describe the change # this field accommodate a description without length limits. -# NOTE: This field will be rendered only for breaking-change and known-issue kinds at the moment. -#description: +#description: -# Affected component; usually one of "elastic-agent", "fleet-server", "filebeat", "metricbeat", "auditbeat", "all", etc. -component: "elastic-agent" +# Affected component; a word indicating the component this changeset affects. +component: elastic-agent -# PR URL; optional; the PR number that added the changeset. +# PR number; optional; the PR number that added the changeset. # If not present is automatically filled by the tooling finding the PR where this changelog fragment has been added. # NOTE: the tooling supports backports, so it's able to fill the original PR number instead of the backport PR number. # Please provide it if you are adding a fragment for a different PR. -pr: https://github.com/elastic/elastic-agent/pull/3748 +pr: 3576 -# Issue URL; optional; the GitHub issue related to this changeset (either closes or is part of). +# Issue number; optional; the GitHub issue related to this changeset (either closes or is part of). # If not present is automatically filled by the tooling with the issue linked to the PR number. -#issue: https://github.com/owner/repo/1234 +issue: 82 diff --git a/changelog/fragments/1698247934-hb-bump-node.yaml b/changelog/fragments/1698247934-hb-bump-node.yaml deleted file mode 100644 index 76bfb42d4ee..00000000000 --- a/changelog/fragments/1698247934-hb-bump-node.yaml +++ /dev/null @@ -1,32 +0,0 @@ -# Kind can be one of: -# - breaking-change: a change to previously-documented behavior -# - deprecation: functionality that is being removed in a later release -# - bug-fix: fixes a problem in a previous version -# - enhancement: extends functionality but does not break or fix existing behavior -# - feature: new functionality -# - known-issue: problems that we are aware of in a given version -# - security: impacts on the security of a product or a user’s deployment. -# - upgrade: important information for someone upgrading from a prior version -# - other: does not fit into any of the other categories -kind: other - -# Change summary; a 80ish characters long description of the change. -summary: hb-bump-node-18.18.x - -# Long description; in case the summary is not enough to describe the change -# this field accommodate a description without length limits. -# NOTE: This field will be rendered only for breaking-change and known-issue kinds at the moment. -description: Bump NodeJS version bundled with Heartbeat to v18.18.2. - -# Affected component; usually one of "elastic-agent", "fleet-server", "filebeat", "metricbeat", "auditbeat", "all", etc. -component: "elastic-agent" - -# PR URL; optional; the PR number that added the changeset. -# If not present is automatically filled by the tooling finding the PR where this changelog fragment has been added. -# NOTE: the tooling supports backports, so it's able to fill the original PR number instead of the backport PR number. -# Please provide it if you are adding a fragment for a different PR. -pr: https://github.com/elastic/elastic-agent/pull/3655 - -# Issue URL; optional; the GitHub issue related to this changeset (either closes or is part of). -# If not present is automatically filled by the tooling with the issue linked to the PR number. -#issue: https://github.com/owner/repo/1234 diff --git a/changelog/fragments/1698808222-orphan-verbs.yaml b/changelog/fragments/1701091034-add-cache-for-secrets.yaml similarity index 89% rename from changelog/fragments/1698808222-orphan-verbs.yaml rename to changelog/fragments/1701091034-add-cache-for-secrets.yaml index a423b94619f..3c52bafa577 100644 --- a/changelog/fragments/1698808222-orphan-verbs.yaml +++ b/changelog/fragments/1701091034-add-cache-for-secrets.yaml @@ -8,10 +8,10 @@ # - security: impacts on the security of a product or a user’s deployment. # - upgrade: important information for someone upgrading from a prior version # - other: does not fit into any of the other categories -kind: bug-fix +kind: feature # Change summary; a 80ish characters long description of the change. -summary: Fix logging calls incorrectly using non-f variants and missing args. +summary: add cache for secrets when using kubernetes secret provider # Long description; in case the summary is not enough to describe the change # this field accommodate a description without length limits. @@ -25,8 +25,8 @@ component: elastic-agent # If not present is automatically filled by the tooling finding the PR where this changelog fragment has been added. # NOTE: the tooling supports backports, so it's able to fill the original PR number instead of the backport PR number. # Please provide it if you are adding a fragment for a different PR. -#pr: https://github.com/owner/repo/1234 +pr: https://github.com/elastic/elastic-agent/pull/3822 # Issue URL; optional; the GitHub issue related to this changeset (either closes or is part of). # If not present is automatically filled by the tooling with the issue linked to the PR number. -#issue: https://github.com/owner/repo/1234 +issue: https://github.com/elastic/elastic-agent/issues/3594 diff --git a/changelog/fragments/1699010487-leaderelectionconfig.yaml b/changelog/fragments/1701293846-upgrade-details-downloading.yaml similarity index 89% rename from changelog/fragments/1699010487-leaderelectionconfig.yaml rename to changelog/fragments/1701293846-upgrade-details-downloading.yaml index e965af360df..da08b38439a 100644 --- a/changelog/fragments/1699010487-leaderelectionconfig.yaml +++ b/changelog/fragments/1701293846-upgrade-details-downloading.yaml @@ -8,10 +8,10 @@ # - security: impacts on the security of a product or a user’s deployment. # - upgrade: important information for someone upgrading from a prior version # - other: does not fit into any of the other categories -kind: enhancement +kind: feature # Change summary; a 80ish characters long description of the change. -summary: Added Kubernetes leader_election provider configuration parameters +summary: Add metadata for retryable upgrade steps to upgrade details # Long description; in case the summary is not enough to describe the change # this field accommodate a description without length limits. @@ -25,8 +25,8 @@ component: elastic-agent # If not present is automatically filled by the tooling finding the PR where this changelog fragment has been added. # NOTE: the tooling supports backports, so it's able to fill the original PR number instead of the backport PR number. # Please provide it if you are adding a fragment for a different PR. -#pr: https://github.com/owner/repo/1234 +pr: https://github.com/elastic/elastic-agent/pull/3845 # Issue URL; optional; the GitHub issue related to this changeset (either closes or is part of). # If not present is automatically filled by the tooling with the issue linked to the PR number. -#issue: https://github.com/owner/repo/1234 +issue: https://github.com/elastic/elastic-agent/issues/3818 diff --git a/changelog/fragments/1700678892-Fixes-the-Elastic-Agent-ignoring-agent.download.proxy_url-on-policy-update.yaml b/changelog/fragments/1701445320-preserve-build-metadata-in-upgrade-version-strings.yaml similarity index 90% rename from changelog/fragments/1700678892-Fixes-the-Elastic-Agent-ignoring-agent.download.proxy_url-on-policy-update.yaml rename to changelog/fragments/1701445320-preserve-build-metadata-in-upgrade-version-strings.yaml index cc09836a0b5..e8d9284e7a5 100644 --- a/changelog/fragments/1700678892-Fixes-the-Elastic-Agent-ignoring-agent.download.proxy_url-on-policy-update.yaml +++ b/changelog/fragments/1701445320-preserve-build-metadata-in-upgrade-version-strings.yaml @@ -11,7 +11,7 @@ kind: bug-fix # Change summary; a 80ish characters long description of the change. -summary: Fixes the Elastic Agent ignoring agent.download.proxy_url on policy update +summary: Preserve build metadata in upgrade version strings # Long description; in case the summary is not enough to describe the change # this field accommodate a description without length limits. @@ -19,7 +19,7 @@ summary: Fixes the Elastic Agent ignoring agent.download.proxy_url on policy upd #description: # Affected component; a word indicating the component this changeset affects. -component: Upgrader +component: agent # PR URL; optional; the PR number that added the changeset. # If not present is automatically filled by the tooling finding the PR where this changelog fragment has been added. @@ -29,4 +29,4 @@ component: Upgrader # Issue URL; optional; the GitHub issue related to this changeset (either closes or is part of). # If not present is automatically filled by the tooling with the issue linked to the PR number. -issue: https://github.com/elastic/elastic-agent/issues/3560 +#issue: https://github.com/owner/repo/1234 diff --git a/changelog/fragments/1701882851-performance-presets.yaml b/changelog/fragments/1701882851-performance-presets.yaml new file mode 100644 index 00000000000..54ea276a7fb --- /dev/null +++ b/changelog/fragments/1701882851-performance-presets.yaml @@ -0,0 +1,8 @@ +kind: feature +summary: Add performance presets to the Elasticsearch output configuration. + +description: Add a "preset" field to Elasticsearch output configurations that applies a set of configuration overrides based on a desired performance priority. Valid values are "balanced", "throughput", "scale", "latency" and "custom". The default if unspecified is "custom". + +component: all +pr: https://github.com/elastic/beats/pull/37259 +issue: https://github.com/elastic/elastic-agent/issues/3797 diff --git a/control_v2.proto b/control_v2.proto index a35477f81cf..de0785ada26 100644 --- a/control_v2.proto +++ b/control_v2.proto @@ -213,7 +213,7 @@ message UpgradeDetails { message UpgradeDetailsMetadata { // If the upgrade is a scheduled upgrade, the timestamp of when the // upgrade is expected to start. - google.protobuf.Timestamp scheduled_at = 1; + string scheduled_at = 1; // If the upgrade is in the UPG_DOWNLOADING state, the percentage of // the Elastic Agent artifact that has already been downloaded, to @@ -225,6 +225,14 @@ message UpgradeDetailsMetadata { // Any error encountered during the upgrade process. string error_msg = 4; + + // Any error message that is a result of a retryable upgrade + // step, e.g. the download step, being retried. + string retry_error_msg = 5; + + // The deadline until when a retryable upgrade step, e.g. the download + // step, will be retried. + string retry_until = 6; } // DiagnosticFileResult is a file result from a diagnostic result. diff --git a/deploy/kubernetes/elastic-agent-managed-kubernetes.yaml b/deploy/kubernetes/elastic-agent-managed-kubernetes.yaml index 0b9526f287d..21c172ffa53 100644 --- a/deploy/kubernetes/elastic-agent-managed-kubernetes.yaml +++ b/deploy/kubernetes/elastic-agent-managed-kubernetes.yaml @@ -30,7 +30,7 @@ spec: dnsPolicy: ClusterFirstWithHostNet containers: - name: elastic-agent - image: docker.elastic.co/beats/elastic-agent:8.12.0 + image: docker.elastic.co/beats/elastic-agent:8.13.0 env: # Set to 1 for enrollment into Fleet server. If not set, Elastic Agent is run in standalone mode - name: FLEET_ENROLL diff --git a/deploy/kubernetes/elastic-agent-standalone-kubernetes.yaml b/deploy/kubernetes/elastic-agent-standalone-kubernetes.yaml index 1aee9d7a74c..8de0a507caf 100644 --- a/deploy/kubernetes/elastic-agent-standalone-kubernetes.yaml +++ b/deploy/kubernetes/elastic-agent-standalone-kubernetes.yaml @@ -698,13 +698,13 @@ spec: # - -c # - >- # mkdir -p /etc/elastic-agent/inputs.d && - # wget -O - https://github.com/elastic/elastic-agent/archive/8.12.tar.gz | tar xz -C /etc/elastic-agent/inputs.d --strip=5 "elastic-agent-8.12/deploy/kubernetes/elastic-agent-standalone/templates.d" + # wget -O - https://github.com/elastic/elastic-agent/archive/8.13.tar.gz | tar xz -C /etc/elastic-agent/inputs.d --strip=5 "elastic-agent-8.13/deploy/kubernetes/elastic-agent-standalone/templates.d" # volumeMounts: # - name: external-inputs # mountPath: /etc/elastic-agent/inputs.d containers: - name: elastic-agent-standalone - image: docker.elastic.co/beats/elastic-agent:8.12.0 + image: docker.elastic.co/beats/elastic-agent:8.13.0 args: ["-c", "/etc/elastic-agent/agent.yml", "-e"] env: # The basic authentication username used to connect to Elasticsearch diff --git a/dev-tools/mage/target/common/notice.go b/dev-tools/mage/target/common/notice.go new file mode 100644 index 00000000000..ecfcf45aa3c --- /dev/null +++ b/dev-tools/mage/target/common/notice.go @@ -0,0 +1,119 @@ +// Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one +// or more contributor license agreements. Licensed under the Elastic License; +// you may not use this file except in compliance with the Elastic License. + +package common + +import ( + "bytes" + "errors" + "fmt" + "io" + "os" + "os/exec" + "strings" + + "github.com/magefile/mage/sh" +) + +func runCommand(cmd string, args ...string) error { + s := strings.Join(append([]string{cmd}, args...), " ") + fmt.Printf(">> %s\n", s) + err := sh.Run(cmd, args...) + if err != nil { + return fmt.Errorf("failed running %s, please fix the issues reported: %w", s, err) + } + return nil +} + +// Notice Generates NOTICE.txt. +func Notice() (err error) { + fmt.Println("Generating NOTICE") + if err := runCommand("go", "mod", "tidy"); err != nil { + return err + } + if err := runCommand("go", "mod", "download"); err != nil { + return err + } + + // piping output of the first command to the second + // similar to former Makefile implementation + // + // go list -m -json all | go run go.elastic.co/go-licence-detector \ + // -includeIndirect \ + // -rules dev-tools/notice/rules.json \ + // -overrides dev-tools/notice/overrides.json \ + // -noticeTemplate dev-tools/notice/NOTICE.txt.tmpl \ + // -noticeOut NOTICE.txt \ + // -depsOut "" + listCmd := exec.Command("go", "list", "-m", "-json", "all") + licDetectCmd := exec.Command("go", "run", "go.elastic.co/go-licence-detector", + "-includeIndirect", + "-rules", "dev-tools/notice/rules.json", + "-overrides", "dev-tools/notice/overrides.json", + "-noticeTemplate", "dev-tools/notice/NOTICE.txt.tmpl", + "-noticeOut", "NOTICE.txt", + "-depsOut", "") + + fmt.Printf(">> %s | %s\n", strings.Join(listCmd.Args, " "), strings.Join(licDetectCmd.Args, " ")) + + r, w := io.Pipe() + defer r.Close() + defer w.Close() + + var buf bytes.Buffer + listCmd.Stdout = w + licDetectCmd.Stdin = r + licDetectCmd.Stderr = &buf + + if err := listCmd.Start(); err != nil { + return err + } + if err := licDetectCmd.Start(); err != nil { + return err + } + + if err := listCmd.Wait(); err != nil { + return err + } + w.Close() + + if err := licDetectCmd.Wait(); err != nil { + // copy error to stdout, helpful if tool failed + if _, cerr := io.Copy(os.Stdout, &buf); cerr != nil { + return errors.Join(fmt.Errorf("failed to read stderr: %w", cerr), err) + } + return err + } + + // cat dev-tools/notice/NOTICE.txt.append >> NOTICE.txt + fmt.Printf(">> %s\n", "cat dev-tools/notice/NOTICE.txt.append >> NOTICE.txt") + const ( + infn = "dev-tools/notice/NOTICE.txt.append" + outfn = "NOTICE.txt" + ) + + f, err := os.Open(infn) + if err != nil { + return fmt.Errorf("failed to open file %s: %w", infn, err) + } + defer f.Close() + + out, err := os.OpenFile(outfn, os.O_WRONLY|os.O_APPEND, 0644) + if err != nil { + return fmt.Errorf("failed to open file %s: %w", outfn, err) + } + + defer func() { + cerr := out.Close() + if err == nil { + err = cerr + } + }() + + if _, err := io.Copy(out, f); err != nil { + return fmt.Errorf("failed to append file %s: %w", outfn, err) + } + + return nil +} diff --git a/dev-tools/packaging/templates/docker/Dockerfile.elastic-agent.tmpl b/dev-tools/packaging/templates/docker/Dockerfile.elastic-agent.tmpl index 1a89be1eaca..9d659fe9cd7 100644 --- a/dev-tools/packaging/templates/docker/Dockerfile.elastic-agent.tmpl +++ b/dev-tools/packaging/templates/docker/Dockerfile.elastic-agent.tmpl @@ -8,12 +8,14 @@ FROM {{ .buildFrom }} AS home COPY beat {{ $beatHome }} -RUN mkdir -p {{ $beatHome }}/data {{ $beatHome }}/data/elastic-agent-{{ commit_short }}/logs && \ - chown -R root:root {{ $beatHome }} && \ +RUN true && \ + # ECE needs to create config here under non-1000 user + chmod 0777 {{ $beatHome}} && \ + mkdir -p {{ $beatHome }}/data {{ $beatHome }}/data/elastic-agent-{{ commit_short }}/logs && \ find {{ $beatHome }} -type d -exec chmod 0755 {} \; && \ find {{ $beatHome }} -type f -exec chmod 0644 {} \; && \ - find {{ $beatHome }}/data -type d -exec chmod 0770 {} \; && \ - find {{ $beatHome }}/data -type f -exec chmod 0660 {} \; && \ + find {{ $beatHome }}/data -type d -exec chmod 0777 {} \; && \ + find {{ $beatHome }}/data -type f -exec chmod 0666 {} \; && \ rm {{ $beatBinary }} && \ ln -s {{ $beatHome }}/data/elastic-agent-{{ commit_short }}/elastic-agent {{ $beatBinary }} && \ chmod 0755 {{ $beatHome }}/data/elastic-agent-*/elastic-agent && \ @@ -27,7 +29,6 @@ RUN mkdir -p {{ $beatHome }}/data {{ $beatHome }}/data/elastic-agent-{{ commit_s (chmod 0755 {{ $beatHome }}/data/elastic-agent-*/components/pf-elastic-collector || true) && \ (chmod 0755 {{ $beatHome }}/data/elastic-agent-*/components/pf-elastic-symbolizer || true) && \ (chmod 0755 {{ $beatHome }}/data/elastic-agent-*/components/pf-host-agent || true) && \ - find {{ $beatHome }}/data/elastic-agent-{{ commit_short }}/components -name "*.yml*" -type f -exec chown root:root {} \; && \ find {{ $beatHome }}/data/elastic-agent-{{ commit_short }}/components -name "*.yml*" -type f -exec chmod 0644 {} \; && \ {{- range $i, $modulesd := .ModulesDirs }} chmod 0775 {{ $beatHome}}/{{ $modulesd }} && \ @@ -111,13 +112,19 @@ RUN set -e ; \ chmod +x /usr/bin/tini COPY docker-entrypoint /usr/local/bin/docker-entrypoint -RUN chmod 755 /usr/local/bin/docker-entrypoint +RUN groupadd --gid 1000 {{ .BeatName }} && \ + useradd -M --uid 1000 --gid 1000 --groups 0 {{ .user }} && \ + chmod 755 /usr/local/bin/docker-entrypoint && \ + true -COPY --from=home {{ $beatHome }} {{ $beatHome }} +COPY --chown={{ .user }}:{{ .user }} --from=home {{ $beatHome }} {{ $beatHome }} # Elastic Agent needs group permissions in the home itself to be able to # create fleet.yml when running as non-root. -RUN chmod 0770 {{ $beatHome }} +RUN chmod 0777 {{ $beatHome }} && \ + usermod -d {{ $beatHome}} {{ .user }} && \ + find {{ $beatHome }}/data/elastic-agent-{{ commit_short }}/components -name "*.yml*" -type f -exec chown root:root {} \; && \ + true RUN mkdir /licenses COPY --from=home {{ $beatHome }}/LICENSE.txt /licenses @@ -127,33 +134,23 @@ COPY --from=home {{ $beatHome }}/NOTICE.txt /licenses COPY --from=home /opt /opt {{- end }} +{{- if contains .image_name "-cloud" }} +# Generate folder for a stub command that will be overwritten at runtime +RUN mkdir /app && \ + chown {{ .user }}:{{ .user }} /app +{{- end }} +# Keep this after any chown command, chown resets any applied capabilities RUN setcap cap_net_raw,cap_setuid+p {{ $beatHome }}/data/elastic-agent-{{ commit_short }}/components/heartbeat && \ {{- if .linux_capabilities }} # Since the beat is stored at the other end of a symlink we must follow the symlink first # For security reasons setcap does not support symlinks. This is smart in the general case # but in our specific case since we're building a trusted image from trusted binaries this is # fine. Thus, we use readlink to follow the link and setcap on the actual binary - readlink -f {{ $beatBinary }} | xargs setcap {{ .linux_capabilities }} && \ + setcap {{ .linux_capabilities }} $(readlink -f {{ $beatBinary }}) && \ {{- end }} true -{{- if eq .user "root" }} -{{- if contains .image_name "-cloud" }} -# Generate folder for a stub command that will be overwritten at runtime -RUN mkdir /app -{{- end }} -{{- else }} -RUN groupadd --gid 1000 {{ .BeatName }} -RUN useradd -M --uid 1000 --gid 1000 --groups 0 --home {{ $beatHome }} {{ .user }} - -{{- if contains .image_name "-cloud" }} -# Generate folder for a stub command that will be overwritten at runtime -RUN mkdir /app -RUN chown {{ .user }} /app -{{- end }} -{{- end }} - {{- if (and (contains .image_name "-complete") (not (contains .from "ubi-minimal"))) }} USER root ENV NODE_PATH={{ $beatHome }}/.node @@ -163,7 +160,7 @@ RUN echo \ {{ $beatHome }}/.synthetics \ {{ $beatHome }}/.npm \ {{ $beatHome }}/.cache \ - | xargs -IDIR sh -c 'mkdir -p DIR && chmod 0770 DIR' + | xargs -IDIR sh -c 'mkdir -p DIR && chmod 0775 DIR' # Setup synthetics env vars ENV ELASTIC_SYNTHETICS_CAPABLE=true @@ -192,14 +189,14 @@ RUN cd {{$beatHome}}/.node \ esac \ && mkdir -p node \ && curl ${NODE_DOWNLOAD_URL} | tar -xJ --strip 1 -C node \ - && chmod ug+rwX -R $NODE_PATH - + && chmod ugo+rwX -R $NODE_PATH \ # Install synthetics as a regular user, installing npm deps as root odesn't work -RUN chown -R {{ .user }} $NODE_PATH + # fix .node .npm and .synthetics + && chown -R {{ .user }}:{{ .user }} $NODE_PATH USER {{ .user }} # If this fails dump the NPM logs -RUN npm i -g --loglevel verbose --engine-strict @elastic/synthetics@stack_release || sh -c 'tail -n +1 /root/.npm/_logs/* && exit 1' -RUN chmod ug+rwX -R $NODE_PATH +RUN (npm i -g --loglevel verbose --engine-strict @elastic/synthetics@stack_release || sh -c 'tail -n +1 /root/.npm/_logs/* && exit 1') && \ + chmod ugo+rwX -R $NODE_PATH USER root # Install the deps as needed by the exact version of playwright elastic synthetics uses @@ -223,6 +220,7 @@ USER {{ .user }} EXPOSE {{ $port }} {{- end }} + # When running under Docker, we must ensure libbeat monitoring pulls cgroup # metrics from /sys/fs/cgroup//, ignoring any paths found in # /proc/self/cgroup. diff --git a/dev-tools/packaging/templates/ironbank/Dockerfile.tmpl b/dev-tools/packaging/templates/ironbank/Dockerfile.tmpl index 58d05bab9e0..26838f70b3a 100644 --- a/dev-tools/packaging/templates/ironbank/Dockerfile.tmpl +++ b/dev-tools/packaging/templates/ironbank/Dockerfile.tmpl @@ -46,6 +46,10 @@ COPY LICENSE /licenses/elastic-${ELASTIC_PRODUCT} # Add a dumb init process COPY tinit /tinit RUN chmod +x /tinit +# Help with supporting the override in the ECK entrypoint +# https://github.com/elastic/cloud-on-k8s/blob/272fd0f2b344b1f86f04decb561eceab8a5a3254/pkg/controller/agent/pod.go#L455 +# TODO: eventually /tinit will be replaced by /usr/bin/tini +RUN ln -s /tinit /usr/bin/tini # Bring in product from the initial stage. COPY --from=prep_files --chown=1000:0 /usr/share/${ELASTIC_PRODUCT} /usr/share/${ELASTIC_PRODUCT} @@ -84,6 +88,7 @@ RUN chmod 755 /usr/local/bin/docker-entrypoint USER ${ELASTIC_PRODUCT} ENV ELASTIC_PRODUCT=${ELASTIC_PRODUCT} +# TODO: eventually /tinit will be replaced by /usr/bin/tini ENTRYPOINT ["/tinit", "--", "/usr/local/bin/docker-entrypoint"] CMD [""] diff --git a/docs/test-framework-dev-guide.md b/docs/test-framework-dev-guide.md index 3e7ef9383b8..9e38e093103 100644 --- a/docs/test-framework-dev-guide.md +++ b/docs/test-framework-dev-guide.md @@ -67,6 +67,16 @@ between, and it can be very specific or not very specific. > **_NOTE:_** This only filters down the tests based on the platform. It will not execute a tests on a platform unless > the test defines as supporting it. +#### Selecting specific group + +By default, the runner will run all test groups. Each group runs on a dedicated machine instance. When working on groups of tests it's better to limit to a specific +group of tests instead of running all tests. This can be done by using the `TEST_GROUPS="default upgrade-standalone"` +environment variable. This variable can take multiple groups with a space between. + +- `TEST_GROUPS="default" mage integration:test` to execute only tests in the "default" group. +- `TEST_GROUPS="default upgrade-standalone" mage integration:test` to execute only tests in the "default" or +"upgrade-standalone" group. + #### Passing additional go test flags When running the tests we can pass additional go test flag using the env variable `GOTEST_FLAGS`. @@ -168,6 +178,17 @@ the `github.com/elastic/elastic-agent/pkg/testing/define` package for the test framework's API and the `github.com/elastic/elastic-agent/pkg/testing/tools` package for helper utilities. +### Test group + +Every `define.Require` must define a `Group` that it belongs too. Each group is executed on a separate instance with all tests with in the same group executed +on the same instance. Placing similar tests in the same group allows those tests to run on its own instance +as well as provides a way for a developer to select a specific group of tests with `TEST_GROUP="{group-name}"`. + +Grouping tests is another way of spreading out the testing load across multiple instances. The more groups that +are defined the more instances will be provisioned to complete all tests. A balance between a small good set of +groups is better than a ton of groups each executing a small set of tests, as the time to set up an instance can +out weight the benefits of creating another group. + ### Test namespaces Every test has access to its own unique namespace (a string value). This namespace can diff --git a/elastic-agent.docker.yml b/elastic-agent.docker.yml index b9353c99839..b9ad5e3f156 100644 --- a/elastic-agent.docker.yml +++ b/elastic-agent.docker.yml @@ -7,6 +7,7 @@ outputs: hosts: '${ELASTICSEARCH_HOSTS:http://elasticsearch:9200}' username: '${ELASTICSEARCH_USERNAME:elastic}' password: '${ELASTICSEARCH_PASSWORD:changeme}' + preset: balanced inputs: - type: system/metrics diff --git a/elastic-agent.reference.yml b/elastic-agent.reference.yml index 5f65481c4fd..db84c2e062e 100644 --- a/elastic-agent.reference.yml +++ b/elastic-agent.reference.yml @@ -15,6 +15,11 @@ outputs: # username: "elastic" # password: "changeme" + # Performance preset for elasticsearch outputs. One of "balanced", "throughput", + # "scale", "latency" and "custom". + # The default if unspecified is "custom". + preset: balanced + inputs: - type: system/metrics # Each input must have a unique ID. diff --git a/elastic-agent.yml b/elastic-agent.yml index 540b7a9a59e..0434c57ccb8 100644 --- a/elastic-agent.yml +++ b/elastic-agent.yml @@ -14,6 +14,7 @@ outputs: api_key: "example-key" #username: "elastic" #password: "changeme" + preset: balanced diff --git a/go.mod b/go.mod index 10f09aa94d8..a7f3412ffa0 100644 --- a/go.mod +++ b/go.mod @@ -15,11 +15,11 @@ require ( github.com/elastic/elastic-agent-autodiscover v0.6.5 github.com/elastic/elastic-agent-client/v7 v7.5.0 github.com/elastic/elastic-agent-libs v0.7.2 - github.com/elastic/elastic-agent-system-metrics v0.8.1 + github.com/elastic/elastic-agent-system-metrics v0.9.1 github.com/elastic/elastic-transport-go/v8 v8.3.0 github.com/elastic/go-elasticsearch/v8 v8.11.0 github.com/elastic/go-licenser v0.4.1 - github.com/elastic/go-sysinfo v1.11.1 + github.com/elastic/go-sysinfo v1.11.2 github.com/elastic/go-ucfg v0.8.6 github.com/fatih/color v1.15.0 github.com/fsnotify/fsnotify v1.6.0 diff --git a/go.sum b/go.sum index ed105c44103..feea72b6c35 100644 --- a/go.sum +++ b/go.sum @@ -783,8 +783,8 @@ github.com/elastic/elastic-agent-client/v7 v7.5.0 h1:niI3WQ+01Lnp2r5LxK8SyNhrPJe github.com/elastic/elastic-agent-client/v7 v7.5.0/go.mod h1:DYoX95xjC4BW/p2avyu724Qr2+hoUIz9eCU9CVS1d+0= github.com/elastic/elastic-agent-libs v0.7.2 h1:yT0hF0UAxJCdQqhHh6SFpgYrcpB10oFzPj8IaytPS2o= github.com/elastic/elastic-agent-libs v0.7.2/go.mod h1:pVBEElQJUO9mr4WStWNXuQGsJn54lcjAoYAHmsvBLBc= -github.com/elastic/elastic-agent-system-metrics v0.8.1 h1:eg6actuLeGJlIJFotHRdlAsz/3WhX2G8E0qI301IKBA= -github.com/elastic/elastic-agent-system-metrics v0.8.1/go.mod h1:9C1UEfj0P687HAzZepHszN6zXA+2tN2Lx3Osvq1zby8= +github.com/elastic/elastic-agent-system-metrics v0.9.1 h1:r0ofKHgPpl+W09ie7tzGcCDC0d4NZbQUv37rSgHf4FM= +github.com/elastic/elastic-agent-system-metrics v0.9.1/go.mod h1:9C1UEfj0P687HAzZepHszN6zXA+2tN2Lx3Osvq1zby8= github.com/elastic/elastic-integration-corpus-generator-tool v0.5.0/go.mod h1:uf9N86y+UACGybdEhZLpwZ93XHWVhsYZAA4c2T2v6YM= github.com/elastic/elastic-package v0.77.0/go.mod h1:Xeqx0OOVnKBfFoSHsHmKI74RxgRGiDhU6yXEu8BkJJM= github.com/elastic/elastic-transport-go/v8 v8.3.0 h1:DJGxovyQLXGr62e9nDMPSxRyWION0Bh6d9eCFBriiHo= @@ -801,8 +801,8 @@ github.com/elastic/go-structform v0.0.10 h1:oy08o/Ih2hHTkNcRY/1HhaYvIp5z6t8si8gn github.com/elastic/go-structform v0.0.10/go.mod h1:CZWf9aIRYY5SuKSmOhtXScE5uQiLZNqAFnwKR4OrIM4= github.com/elastic/go-sysinfo v1.1.1/go.mod h1:i1ZYdU10oLNfRzq4vq62BEwD2fH8KaWh6eh0ikPT9F0= github.com/elastic/go-sysinfo v1.7.1/go.mod h1:i1ZYdU10oLNfRzq4vq62BEwD2fH8KaWh6eh0ikPT9F0= -github.com/elastic/go-sysinfo v1.11.1 h1:g9mwl05njS4r69TisC+vwHWTSKywZFYYUu3so3T/Lao= -github.com/elastic/go-sysinfo v1.11.1/go.mod h1:6KQb31j0QeWBDF88jIdWSxE8cwoOB9tO4Y4osN7Q70E= +github.com/elastic/go-sysinfo v1.11.2 h1:mcm4OSYVMyws6+n2HIVMGkln5HOpo5Ie1ZmbbNn0jg4= +github.com/elastic/go-sysinfo v1.11.2/go.mod h1:GKqR8bbMK/1ITnez9NIsIfXQr25aLhRJa7AfT8HpBFQ= github.com/elastic/go-ucfg v0.8.6 h1:stUeyh2goTgGX+/wb9gzKvTv0YB0231LTpKUgCKj4U0= github.com/elastic/go-ucfg v0.8.6/go.mod h1:4E8mPOLSUV9hQ7sgLEJ4bvt0KhMuDJa8joDT2QGAEKA= github.com/elastic/go-windows v1.0.0/go.mod h1:TsU0Nrp7/y3+VwE82FoZF8gC/XFg/Elz6CcloAxnPgU= diff --git a/internal/pkg/agent/application/coordinator/diagnostics_test.go b/internal/pkg/agent/application/coordinator/diagnostics_test.go index 1e12dd38141..2ccc239fe34 100644 --- a/internal/pkg/agent/application/coordinator/diagnostics_test.go +++ b/internal/pkg/agent/application/coordinator/diagnostics_test.go @@ -383,12 +383,10 @@ func TestDiagnosticComponentsActual(t *testing.T) { }, } - // The error values here shouldn't really be empty, this is a known bug, see - // https://github.com/elastic/elastic-agent/issues/2940 expected := ` components: - id: component-1 - error: {} + error: "component error" input_type: "test-input" output_type: "test-output" units: @@ -443,6 +441,7 @@ func TestDiagnosticState(t *testing.T) { DownloadPercent: 0.17469, ScheduledAt: &now, DownloadRate: 123.56, + RetryUntil: &now, }, }, } @@ -472,7 +471,8 @@ upgrade_details: download_percent: 0.17469 scheduled_at: %s download_rate: 123.56 -`, now.Format(time.RFC3339Nano)) + retry_until: %s +`, now.Format(time.RFC3339Nano), now.Format(time.RFC3339Nano)) coord := &Coordinator{ // This test needs a broadcaster since the components-actual diagnostic diff --git a/internal/pkg/agent/application/paths/common.go b/internal/pkg/agent/application/paths/common.go index a02467abe0f..c36dbef8ae5 100644 --- a/internal/pkg/agent/application/paths/common.go +++ b/internal/pkg/agent/application/paths/common.go @@ -59,7 +59,7 @@ func init() { fs.StringVar(&configPath, "path.config", configPath, "Config path is the directory Agent looks for its config file") fs.StringVar(&configFilePath, "c", DefaultConfigName, "Configuration file, relative to path.config") fs.StringVar(&logsPath, "path.logs", logsPath, "Logs path contains Agent log output") - fs.StringVar(&installPath, "path.install", installPath, "Install path contains binaries Agent extracts") + fs.StringVar(&installPath, "path.install", installPath, "DEPRECATED, setting this flag has no effect since v8.6.0") // enable user to download update artifacts to alternative place // TODO: remove path.downloads support on next major (this can be configured using `agent.download.targetDirectory`) diff --git a/internal/pkg/agent/application/upgrade/artifact/artifact.go b/internal/pkg/agent/application/upgrade/artifact/artifact.go index c0e8c84a9d8..09b73785dbd 100644 --- a/internal/pkg/agent/application/upgrade/artifact/artifact.go +++ b/internal/pkg/agent/application/upgrade/artifact/artifact.go @@ -9,6 +9,7 @@ import ( "path/filepath" "github.com/elastic/elastic-agent/internal/pkg/agent/errors" + agtversion "github.com/elastic/elastic-agent/pkg/version" ) var packageArchMap = map[string]string{ @@ -31,18 +32,18 @@ type Artifact struct { } // GetArtifactName constructs a path to a downloaded artifact -func GetArtifactName(a Artifact, version, operatingSystem, arch string) (string, error) { +func GetArtifactName(a Artifact, version agtversion.ParsedSemVer, operatingSystem, arch string) (string, error) { key := fmt.Sprintf("%s-binary-%s", operatingSystem, arch) suffix, found := packageArchMap[key] if !found { return "", errors.New(fmt.Sprintf("'%s' is not a valid combination for a package", key), errors.TypeConfig) } - return fmt.Sprintf("%s-%s-%s", a.Cmd, version, suffix), nil + return fmt.Sprintf("%s-%s-%s", a.Cmd, version.String(), suffix), nil } // GetArtifactPath returns a full path of artifact for a program in specific version -func GetArtifactPath(a Artifact, version, operatingSystem, arch, targetDir string) (string, error) { +func GetArtifactPath(a Artifact, version agtversion.ParsedSemVer, operatingSystem, arch, targetDir string) (string, error) { artifactName, err := GetArtifactName(a, version, operatingSystem, arch) if err != nil { return "", err diff --git a/internal/pkg/agent/application/upgrade/artifact/download/composed/downloader.go b/internal/pkg/agent/application/upgrade/artifact/download/composed/downloader.go index b5de15fc9a8..476d5790b63 100644 --- a/internal/pkg/agent/application/upgrade/artifact/download/composed/downloader.go +++ b/internal/pkg/agent/application/upgrade/artifact/download/composed/downloader.go @@ -13,6 +13,7 @@ import ( "github.com/elastic/elastic-agent/internal/pkg/agent/application/upgrade/artifact" "github.com/elastic/elastic-agent/internal/pkg/agent/application/upgrade/artifact/download" "github.com/elastic/elastic-agent/internal/pkg/agent/errors" + "github.com/elastic/elastic-agent/pkg/version" ) // Downloader is a downloader with a predefined set of downloaders. @@ -35,7 +36,7 @@ func NewDownloader(downloaders ...download.Downloader) *Downloader { // Download fetches the package from configured source. // Returns absolute path to downloaded package and an error. -func (e *Downloader) Download(ctx context.Context, a artifact.Artifact, version string) (string, error) { +func (e *Downloader) Download(ctx context.Context, a artifact.Artifact, version *version.ParsedSemVer) (string, error) { var err error span, ctx := apm.StartSpan(ctx, "download", "app.internal") defer span.End() diff --git a/internal/pkg/agent/application/upgrade/artifact/download/composed/downloader_test.go b/internal/pkg/agent/application/upgrade/artifact/download/composed/downloader_test.go index c9820822d6f..26803adeb0b 100644 --- a/internal/pkg/agent/application/upgrade/artifact/download/composed/downloader_test.go +++ b/internal/pkg/agent/application/upgrade/artifact/download/composed/downloader_test.go @@ -9,8 +9,11 @@ import ( "errors" "testing" + "github.com/stretchr/testify/require" + "github.com/elastic/elastic-agent/internal/pkg/agent/application/upgrade/artifact" "github.com/elastic/elastic-agent/internal/pkg/agent/application/upgrade/artifact/download" + agtversion "github.com/elastic/elastic-agent/pkg/version" "github.com/stretchr/testify/assert" ) @@ -23,7 +26,7 @@ type FailingDownloader struct { called bool } -func (d *FailingDownloader) Download(ctx context.Context, _ artifact.Artifact, _ string) (string, error) { +func (d *FailingDownloader) Download(context.Context, artifact.Artifact, *agtversion.ParsedSemVer) (string, error) { d.called = true return "", errors.New("failing") } @@ -34,7 +37,7 @@ type SuccDownloader struct { called bool } -func (d *SuccDownloader) Download(ctx context.Context, _ artifact.Artifact, _ string) (string, error) { +func (d *SuccDownloader) Download(context.Context, artifact.Artifact, *agtversion.ParsedSemVer) (string, error) { d.called = true return succ, nil } @@ -61,9 +64,11 @@ func TestComposed(t *testing.T) { }, } + parseVersion, err := agtversion.ParseVersion("1.2.3") + require.NoError(t, err) for _, tc := range testCases { d := NewDownloader(tc.downloaders[0], tc.downloaders[1]) - r, _ := d.Download(context.TODO(), artifact.Artifact{Name: "a"}, "b") + r, _ := d.Download(context.TODO(), artifact.Artifact{Name: "a"}, parseVersion) assert.Equal(t, tc.expectedResult, r == succ) diff --git a/internal/pkg/agent/application/upgrade/artifact/download/composed/verifier.go b/internal/pkg/agent/application/upgrade/artifact/download/composed/verifier.go index c0b9cce26e9..bfb305c8cf0 100644 --- a/internal/pkg/agent/application/upgrade/artifact/download/composed/verifier.go +++ b/internal/pkg/agent/application/upgrade/artifact/download/composed/verifier.go @@ -11,6 +11,7 @@ import ( "github.com/elastic/elastic-agent/internal/pkg/agent/application/upgrade/artifact/download" "github.com/elastic/elastic-agent/internal/pkg/agent/errors" "github.com/elastic/elastic-agent/pkg/core/logger" + agtversion "github.com/elastic/elastic-agent/pkg/version" ) // Verifier is a verifier with a predefined set of verifiers. @@ -38,7 +39,7 @@ func NewVerifier(log *logger.Logger, verifiers ...download.Verifier) *Verifier { } // Verify checks the package from configured source. -func (v *Verifier) Verify(a artifact.Artifact, version string, skipDefaultPgp bool, pgpBytes ...string) error { +func (v *Verifier) Verify(a artifact.Artifact, version agtversion.ParsedSemVer, skipDefaultPgp bool, pgpBytes ...string) error { var err error for _, verifier := range v.vv { diff --git a/internal/pkg/agent/application/upgrade/artifact/download/composed/verifier_test.go b/internal/pkg/agent/application/upgrade/artifact/download/composed/verifier_test.go index 088c1a29b6d..d71129db785 100644 --- a/internal/pkg/agent/application/upgrade/artifact/download/composed/verifier_test.go +++ b/internal/pkg/agent/application/upgrade/artifact/download/composed/verifier_test.go @@ -11,6 +11,7 @@ import ( "github.com/elastic/elastic-agent/internal/pkg/agent/application/upgrade/artifact" "github.com/elastic/elastic-agent/internal/pkg/agent/application/upgrade/artifact/download" "github.com/elastic/elastic-agent/pkg/core/logger" + agtversion "github.com/elastic/elastic-agent/pkg/version" "github.com/stretchr/testify/assert" ) @@ -23,7 +24,7 @@ func (d *ErrorVerifier) Name() string { return "error" } -func (d *ErrorVerifier) Verify(a artifact.Artifact, version string, _ bool, _ ...string) error { +func (d *ErrorVerifier) Verify(artifact.Artifact, agtversion.ParsedSemVer, bool, ...string) error { d.called = true return errors.New("failing") } @@ -38,7 +39,7 @@ func (d *FailVerifier) Name() string { return "fail" } -func (d *FailVerifier) Verify(a artifact.Artifact, version string, _ bool, _ ...string) error { +func (d *FailVerifier) Verify(artifact.Artifact, agtversion.ParsedSemVer, bool, ...string) error { d.called = true return &download.InvalidSignatureError{File: "", Err: errors.New("invalid signature")} } @@ -53,7 +54,7 @@ func (d *SuccVerifier) Name() string { return "succ" } -func (d *SuccVerifier) Verify(a artifact.Artifact, version string, _ bool, _ ...string) error { +func (d *SuccVerifier) Verify(artifact.Artifact, agtversion.ParsedSemVer, bool, ...string) error { d.called = true return nil } @@ -86,9 +87,10 @@ func TestVerifier(t *testing.T) { }, } + testVersion := agtversion.NewParsedSemVer(1, 2, 3, "", "") for _, tc := range testCases { d := NewVerifier(log, tc.verifiers[0], tc.verifiers[1], tc.verifiers[2]) - err := d.Verify(artifact.Artifact{Name: "a", Cmd: "a", Artifact: "a/a"}, "b", false) + err := d.Verify(artifact.Artifact{Name: "a", Cmd: "a", Artifact: "a/a"}, *testVersion, false) assert.Equal(t, tc.expectedResult, err == nil) diff --git a/internal/pkg/agent/application/upgrade/artifact/download/downloader.go b/internal/pkg/agent/application/upgrade/artifact/download/downloader.go index 19e102ab3c9..db32b7bfe97 100644 --- a/internal/pkg/agent/application/upgrade/artifact/download/downloader.go +++ b/internal/pkg/agent/application/upgrade/artifact/download/downloader.go @@ -8,9 +8,10 @@ import ( "context" "github.com/elastic/elastic-agent/internal/pkg/agent/application/upgrade/artifact" + "github.com/elastic/elastic-agent/pkg/version" ) // Downloader is an interface allowing download of an artifact type Downloader interface { - Download(ctx context.Context, a artifact.Artifact, version string) (string, error) + Download(ctx context.Context, a artifact.Artifact, version *version.ParsedSemVer) (string, error) } diff --git a/internal/pkg/agent/application/upgrade/artifact/download/fs/downloader.go b/internal/pkg/agent/application/upgrade/artifact/download/fs/downloader.go index 6de72f0143e..a95f04ba4c3 100644 --- a/internal/pkg/agent/application/upgrade/artifact/download/fs/downloader.go +++ b/internal/pkg/agent/application/upgrade/artifact/download/fs/downloader.go @@ -16,6 +16,7 @@ import ( "github.com/elastic/elastic-agent/internal/pkg/agent/application/paths" "github.com/elastic/elastic-agent/internal/pkg/agent/application/upgrade/artifact" "github.com/elastic/elastic-agent/internal/pkg/agent/errors" + agtversion "github.com/elastic/elastic-agent/pkg/version" ) const ( @@ -38,7 +39,7 @@ func NewDownloader(config *artifact.Config) *Downloader { // Download fetches the package from configured source. // Returns absolute path to downloaded package and an error. -func (e *Downloader) Download(ctx context.Context, a artifact.Artifact, version string) (_ string, err error) { +func (e *Downloader) Download(ctx context.Context, a artifact.Artifact, version *agtversion.ParsedSemVer) (_ string, err error) { span, ctx := apm.StartSpan(ctx, "download", "app.internal") defer span.End() downloadedFiles := make([]string, 0, 2) @@ -52,20 +53,20 @@ func (e *Downloader) Download(ctx context.Context, a artifact.Artifact, version }() // download from source to dest - path, err := e.download(e.config.OS(), a, version, "") + path, err := e.download(e.config.OS(), a, *version, "") downloadedFiles = append(downloadedFiles, path) if err != nil { return "", err } - hashPath, err := e.download(e.config.OS(), a, version, ".sha512") + hashPath, err := e.download(e.config.OS(), a, *version, ".sha512") downloadedFiles = append(downloadedFiles, hashPath) return path, err } // DownloadAsc downloads the package .asc file from configured source. // It returns absolute path to the downloaded file and a no-nil error if any occurs. -func (e *Downloader) DownloadAsc(_ context.Context, a artifact.Artifact, version string) (string, error) { +func (e *Downloader) DownloadAsc(_ context.Context, a artifact.Artifact, version agtversion.ParsedSemVer) (string, error) { path, err := e.download(e.config.OS(), a, version, ".asc") if err != nil { os.Remove(path) @@ -78,7 +79,7 @@ func (e *Downloader) DownloadAsc(_ context.Context, a artifact.Artifact, version func (e *Downloader) download( operatingSystem string, a artifact.Artifact, - version, + version agtversion.ParsedSemVer, extension string) (string, error) { filename, err := artifact.GetArtifactName(a, version, operatingSystem, e.config.Arch()) if err != nil { diff --git a/internal/pkg/agent/application/upgrade/artifact/download/fs/downloader_test.go b/internal/pkg/agent/application/upgrade/artifact/download/fs/downloader_test.go new file mode 100644 index 00000000000..0010ad33d5a --- /dev/null +++ b/internal/pkg/agent/application/upgrade/artifact/download/fs/downloader_test.go @@ -0,0 +1,293 @@ +// Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one +// or more contributor license agreements. Licensed under the Elastic License; +// you may not use this file except in compliance with the Elastic License. + +package fs + +import ( + "context" + "fmt" + "os" + "path/filepath" + "testing" + + "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/require" + + "github.com/elastic/elastic-agent/internal/pkg/agent/application/upgrade/artifact" + agtversion "github.com/elastic/elastic-agent/pkg/version" +) + +type file struct { + Name string + Body []byte +} + +func TestDownloader_Download(t *testing.T) { + type fields struct { + config *artifact.Config + } + type args struct { + a artifact.Artifact + version *agtversion.ParsedSemVer + } + tests := []struct { + name string + files []file + fields fields + args args + want string + wantErr assert.ErrorAssertionFunc + }{ + { + name: "happy path released version", + files: []file{ + { + "elastic-agent-1.2.3-linux-x86_64.tar.gz", + []byte("This is a fake linux elastic agent archive"), + }, + { + "elastic-agent-1.2.3-linux-x86_64.tar.gz.sha512", + []byte("somesha512 elastic-agent-1.2.3-linux-x86_64.tar.gz"), + }, + }, + fields: fields{ + config: &artifact.Config{ + OperatingSystem: "linux", + Architecture: "64", + }, + }, + args: args{a: agentSpec, version: agtversion.NewParsedSemVer(1, 2, 3, "", "")}, + want: "elastic-agent-1.2.3-linux-x86_64.tar.gz", + wantErr: assert.NoError, + }, + { + name: "no hash released version", + files: []file{ + { + "elastic-agent-1.2.3-linux-x86_64.tar.gz", + []byte("This is a fake linux elastic agent archive"), + }, + }, + fields: fields{ + config: &artifact.Config{ + OperatingSystem: "linux", + Architecture: "64", + }, + }, + args: args{a: agentSpec, version: agtversion.NewParsedSemVer(1, 2, 3, "", "")}, + want: "elastic-agent-1.2.3-linux-x86_64.tar.gz", + wantErr: assert.Error, + }, + { + name: "happy path snapshot version", + files: []file{ + { + "elastic-agent-1.2.3-SNAPSHOT-linux-x86_64.tar.gz", + []byte("This is a fake linux elastic agent archive"), + }, + { + "elastic-agent-1.2.3-SNAPSHOT-linux-x86_64.tar.gz.sha512", + []byte("somesha512 elastic-agent-1.2.3-SNAPSHOT-linux-x86_64.tar.gz"), + }, + }, + fields: fields{ + config: &artifact.Config{ + OperatingSystem: "linux", + Architecture: "64", + }, + }, + args: args{a: agentSpec, version: agtversion.NewParsedSemVer(1, 2, 3, "SNAPSHOT", "")}, + want: "elastic-agent-1.2.3-SNAPSHOT-linux-x86_64.tar.gz", + wantErr: assert.NoError, + }, + { + name: "happy path released version with build metadata", + files: []file{ + { + "elastic-agent-1.2.3+build19700101-linux-x86_64.tar.gz", + []byte("This is a fake linux elastic agent archive"), + }, + { + "elastic-agent-1.2.3+build19700101-linux-x86_64.tar.gz.sha512", + []byte("somesha512 elastic-agent-1.2.3+build19700101-linux-x86_64.tar.gz"), + }, + }, + fields: fields{ + config: &artifact.Config{ + OperatingSystem: "linux", + Architecture: "64", + }, + }, + args: args{a: agentSpec, version: agtversion.NewParsedSemVer(1, 2, 3, "", "build19700101")}, + want: "elastic-agent-1.2.3+build19700101-linux-x86_64.tar.gz", + wantErr: assert.NoError, + }, + { + name: "happy path snapshot version with build metadata", + files: []file{ + { + "elastic-agent-1.2.3-SNAPSHOT+build19700101-linux-x86_64.tar.gz", + []byte("This is a fake linux elastic agent archive"), + }, + { + "elastic-agent-1.2.3-SNAPSHOT+build19700101-linux-x86_64.tar.gz.sha512", + []byte("somesha512 elastic-agent-1.2.3-SNAPSHOT+build19700101-linux-x86_64.tar.gz"), + }, + }, + fields: fields{ + config: &artifact.Config{ + OperatingSystem: "linux", + Architecture: "64", + }, + }, + args: args{a: agentSpec, version: agtversion.NewParsedSemVer(1, 2, 3, "SNAPSHOT", "build19700101")}, + want: "elastic-agent-1.2.3-SNAPSHOT+build19700101-linux-x86_64.tar.gz", + wantErr: assert.NoError, + }, + } + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + + dropPath := t.TempDir() + targetDirPath := t.TempDir() + + createFiles(t, dropPath, tt.files) + + config := tt.fields.config + config.DropPath = dropPath + config.TargetDirectory = targetDirPath + + e := &Downloader{ + dropPath: dropPath, + config: config, + } + got, err := e.Download(context.TODO(), tt.args.a, tt.args.version) + if !tt.wantErr(t, err, fmt.Sprintf("Download(%v, %v)", tt.args.a, tt.args.version)) { + return + } + assert.Equalf(t, filepath.Join(targetDirPath, tt.want), got, "Download(%v, %v)", tt.args.a, tt.args.version) + }) + } +} + +func createFiles(t *testing.T, dstPath string, files []file) { + for _, f := range files { + dstFile := filepath.Join(dstPath, f.Name) + err := os.WriteFile(dstFile, f.Body, 0o666) + require.NoErrorf(t, err, "error preparing file %s: %v", dstFile, err) + } +} + +func TestDownloader_DownloadAsc(t *testing.T) { + type fields struct { + config *artifact.Config + } + type args struct { + a artifact.Artifact + version agtversion.ParsedSemVer + } + tests := []struct { + name string + files []file + fields fields + args args + want string + wantErr assert.ErrorAssertionFunc + }{ + { + name: "happy path released version", + files: []file{ + { + "elastic-agent-1.2.3-linux-x86_64.tar.gz.asc", + []byte("fake signature for elastic-agent package"), + }, + }, + fields: fields{ + config: &artifact.Config{ + OperatingSystem: "linux", + Architecture: "64", + }, + }, + args: args{a: agentSpec, version: *agtversion.NewParsedSemVer(1, 2, 3, "", "")}, + want: "elastic-agent-1.2.3-linux-x86_64.tar.gz.asc", + wantErr: assert.NoError, + }, + { + name: "happy path snapshot version", + files: []file{ + { + "elastic-agent-1.2.3-SNAPSHOT-linux-x86_64.tar.gz.asc", + []byte("fake signature for elastic-agent package"), + }, + }, + fields: fields{ + config: &artifact.Config{ + OperatingSystem: "linux", + Architecture: "64", + }, + }, + args: args{a: agentSpec, version: *agtversion.NewParsedSemVer(1, 2, 3, "SNAPSHOT", "")}, + want: "elastic-agent-1.2.3-SNAPSHOT-linux-x86_64.tar.gz.asc", + wantErr: assert.NoError, + }, + { + name: "happy path released version with build metadata", + files: []file{ + { + "elastic-agent-1.2.3+build19700101-linux-x86_64.tar.gz.asc", + []byte("fake signature for elastic-agent package"), + }, + }, + fields: fields{ + config: &artifact.Config{ + OperatingSystem: "linux", + Architecture: "64", + }, + }, + args: args{a: agentSpec, version: *agtversion.NewParsedSemVer(1, 2, 3, "", "build19700101")}, + want: "elastic-agent-1.2.3+build19700101-linux-x86_64.tar.gz.asc", + wantErr: assert.NoError, + }, + { + name: "happy path snapshot version with build metadata", + files: []file{ + { + "elastic-agent-1.2.3-SNAPSHOT+build19700101-linux-x86_64.tar.gz.asc", + []byte("fake signature for elastic-agent package"), + }, + }, + fields: fields{ + config: &artifact.Config{ + OperatingSystem: "linux", + Architecture: "64", + }, + }, + args: args{a: agentSpec, version: *agtversion.NewParsedSemVer(1, 2, 3, "SNAPSHOT", "build19700101")}, + want: "elastic-agent-1.2.3-SNAPSHOT+build19700101-linux-x86_64.tar.gz.asc", + wantErr: assert.NoError, + }, + } + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + dropPath := t.TempDir() + targetDirPath := t.TempDir() + + createFiles(t, dropPath, tt.files) + + config := tt.fields.config + config.DropPath = dropPath + config.TargetDirectory = targetDirPath + + e := &Downloader{ + dropPath: dropPath, + config: config, + } + got, err := e.DownloadAsc(context.TODO(), tt.args.a, tt.args.version) + if !tt.wantErr(t, err, fmt.Sprintf("DownloadAsc(%v, %v)", tt.args.a, tt.args.version)) { + return + } + assert.Equalf(t, filepath.Join(targetDirPath, tt.want), got, "DownloadAsc(%v, %v)", tt.args.a, tt.args.version) + }) + } +} diff --git a/internal/pkg/agent/application/upgrade/artifact/download/fs/verifier.go b/internal/pkg/agent/application/upgrade/artifact/download/fs/verifier.go index 8c7861e1c75..6576143198f 100644 --- a/internal/pkg/agent/application/upgrade/artifact/download/fs/verifier.go +++ b/internal/pkg/agent/application/upgrade/artifact/download/fs/verifier.go @@ -15,6 +15,7 @@ import ( "github.com/elastic/elastic-agent/internal/pkg/agent/application/upgrade/artifact/download" "github.com/elastic/elastic-agent/internal/pkg/agent/errors" "github.com/elastic/elastic-agent/pkg/core/logger" + agtversion "github.com/elastic/elastic-agent/pkg/version" ) const ( @@ -64,7 +65,7 @@ func NewVerifier(log *logger.Logger, config *artifact.Config, pgp []byte) (*Veri // Verify checks downloaded package on preconfigured // location against a key stored on elastic.co website. -func (v *Verifier) Verify(a artifact.Artifact, version string, skipDefaultPgp bool, pgpBytes ...string) error { +func (v *Verifier) Verify(a artifact.Artifact, version agtversion.ParsedSemVer, skipDefaultPgp bool, pgpBytes ...string) error { filename, err := artifact.GetArtifactName(a, version, v.config.OS(), v.config.Arch()) if err != nil { return fmt.Errorf("could not get artifact name: %w", err) diff --git a/internal/pkg/agent/application/upgrade/artifact/download/fs/verifier_test.go b/internal/pkg/agent/application/upgrade/artifact/download/fs/verifier_test.go index 4bd605142f3..280a4c374b3 100644 --- a/internal/pkg/agent/application/upgrade/artifact/download/fs/verifier_test.go +++ b/internal/pkg/agent/application/upgrade/artifact/download/fs/verifier_test.go @@ -22,15 +22,14 @@ import ( "github.com/elastic/elastic-agent/internal/pkg/agent/application/upgrade/artifact/download" "github.com/elastic/elastic-agent/internal/pkg/release" "github.com/elastic/elastic-agent/pkg/core/logger" + agtversion "github.com/elastic/elastic-agent/pkg/version" "github.com/elastic/elastic-agent/testing/pgptest" ) -const ( - version = "7.5.1" -) +var testVersion = agtversion.NewParsedSemVer(7, 5, 1, "", "") var ( - beatSpec = artifact.Artifact{ + agentSpec = artifact.Artifact{ Name: "Elastic Agent", Cmd: "elastic-agent", Artifact: "beat/elastic-agent"} @@ -48,7 +47,7 @@ func TestFetchVerify(t *testing.T) { ctx := context.Background() a := artifact.Artifact{ Name: "elastic-agent", Cmd: "elastic-agent", Artifact: "beats/elastic-agent"} - version := "8.0.0" + version := agtversion.NewParsedSemVer(8, 0, 0, "", "") filename := "elastic-agent-8.0.0-darwin-x86_64.tar.gz" targetFilePath := filepath.Join(targetPath, filename) @@ -80,7 +79,7 @@ func TestFetchVerify(t *testing.T) { // first download verify should fail: // download skipped, as invalid package is prepared upfront // verify fails and cleans download - err = verifier.Verify(a, version, false) + err = verifier.Verify(a, *version, false) var checksumErr *download.ChecksumMismatchError require.ErrorAs(t, err, &checksumErr) @@ -109,7 +108,7 @@ func TestFetchVerify(t *testing.T) { _, err = os.Stat(ascTargetFilePath) require.NoError(t, err) - err = verifier.Verify(a, version, false) + err = verifier.Verify(a, *version, false) require.NoError(t, err) // Bad GPG public key. @@ -126,7 +125,7 @@ func TestFetchVerify(t *testing.T) { // Missing .asc file. { - err = verifier.Verify(a, version, false) + err = verifier.Verify(a, *version, false) require.Error(t, err) // Don't delete these files when GPG validation failure. @@ -139,7 +138,7 @@ func TestFetchVerify(t *testing.T) { err = os.WriteFile(targetFilePath+".asc", []byte("bad sig"), 0o600) require.NoError(t, err) - err = verifier.Verify(a, version, false) + err = verifier.Verify(a, *version, false) var invalidSigErr *download.InvalidSignatureError assert.ErrorAs(t, err, &invalidSigErr) @@ -217,12 +216,12 @@ func TestVerify(t *testing.T) { }, } - pgpKey := prepareTestCase(t, beatSpec, version, config) + pgpKey := prepareTestCase(t, agentSpec, testVersion, config) testClient := NewDownloader(config) - artifactPath, err := testClient.Download(context.Background(), beatSpec, version) + artifactPath, err := testClient.Download(context.Background(), agentSpec, testVersion) require.NoError(t, err, "fs.Downloader could not download artifacts") - _, err = testClient.DownloadAsc(context.Background(), beatSpec, version) + _, err = testClient.DownloadAsc(context.Background(), agentSpec, *testVersion) require.NoError(t, err, "fs.Downloader could not download artifacts .asc file") _, err = os.Stat(artifactPath) @@ -231,7 +230,7 @@ func TestVerify(t *testing.T) { testVerifier, err := NewVerifier(log, config, pgpKey) require.NoError(t, err) - err = testVerifier.Verify(beatSpec, version, false, tc.RemotePGPUris...) + err = testVerifier.Verify(agentSpec, *testVersion, false, tc.RemotePGPUris...) require.NoError(t, err) // log message informing remote PGP was skipped @@ -245,13 +244,9 @@ func TestVerify(t *testing.T) { // its corresponding checksum (.sha512) and signature (.asc) files. // It creates the necessary key to sing the artifact and returns the public key // to verify the signature. -func prepareTestCase( - t *testing.T, - a artifact.Artifact, - version string, - cfg *artifact.Config) []byte { +func prepareTestCase(t *testing.T, a artifact.Artifact, version *agtversion.ParsedSemVer, cfg *artifact.Config) []byte { - filename, err := artifact.GetArtifactName(a, version, cfg.OperatingSystem, cfg.Architecture) + filename, err := artifact.GetArtifactName(a, *version, cfg.OperatingSystem, cfg.Architecture) require.NoErrorf(t, err, "could not get artifact name") err = os.MkdirAll(cfg.DropPath, 0777) diff --git a/internal/pkg/agent/application/upgrade/artifact/download/http/common_test.go b/internal/pkg/agent/application/upgrade/artifact/download/http/common_test.go index cfc899420c2..9094723eedb 100644 --- a/internal/pkg/agent/application/upgrade/artifact/download/http/common_test.go +++ b/internal/pkg/agent/application/upgrade/artifact/download/http/common_test.go @@ -19,16 +19,17 @@ import ( "github.com/stretchr/testify/assert" "github.com/elastic/elastic-agent/internal/pkg/agent/application/upgrade/artifact" + agtversion "github.com/elastic/elastic-agent/pkg/version" "github.com/elastic/elastic-agent/testing/pgptest" ) const ( - version = "7.5.1" sourcePattern = "/downloads/beats/filebeat/" source = "http://artifacts.elastic.co/downloads/" ) var ( + version = agtversion.NewParsedSemVer(7, 5, 1, "", "") beatSpec = artifact.Artifact{ Name: "filebeat", Cmd: "filebeat", diff --git a/internal/pkg/agent/application/upgrade/artifact/download/http/downloader.go b/internal/pkg/agent/application/upgrade/artifact/download/http/downloader.go index 50fc6849f21..ffd28a3fc16 100644 --- a/internal/pkg/agent/application/upgrade/artifact/download/http/downloader.go +++ b/internal/pkg/agent/application/upgrade/artifact/download/http/downloader.go @@ -23,6 +23,7 @@ import ( "github.com/elastic/elastic-agent/internal/pkg/agent/application/upgrade/details" "github.com/elastic/elastic-agent/internal/pkg/agent/errors" "github.com/elastic/elastic-agent/pkg/core/logger" + agtversion "github.com/elastic/elastic-agent/pkg/version" ) const ( @@ -93,7 +94,7 @@ func (e *Downloader) Reload(c *artifact.Config) error { // Download fetches the package from configured source. // Returns absolute path to downloaded package and an error. -func (e *Downloader) Download(ctx context.Context, a artifact.Artifact, version string) (_ string, err error) { +func (e *Downloader) Download(ctx context.Context, a artifact.Artifact, version *agtversion.ParsedSemVer) (_ string, err error) { remoteArtifact := a.Artifact downloadedFiles := make([]string, 0, 2) defer func() { @@ -107,13 +108,13 @@ func (e *Downloader) Download(ctx context.Context, a artifact.Artifact, version }() // download from source to dest - path, err := e.download(ctx, remoteArtifact, e.config.OS(), a, version) + path, err := e.download(ctx, remoteArtifact, e.config.OS(), a, *version) downloadedFiles = append(downloadedFiles, path) if err != nil { return "", err } - hashPath, err := e.downloadHash(ctx, remoteArtifact, e.config.OS(), a, version) + hashPath, err := e.downloadHash(ctx, remoteArtifact, e.config.OS(), a, *version) downloadedFiles = append(downloadedFiles, hashPath) return path, err } @@ -135,7 +136,7 @@ func (e *Downloader) composeURI(artifactName, packageName string) (string, error return uri.String(), nil } -func (e *Downloader) download(ctx context.Context, remoteArtifact string, operatingSystem string, a artifact.Artifact, version string) (string, error) { +func (e *Downloader) download(ctx context.Context, remoteArtifact string, operatingSystem string, a artifact.Artifact, version agtversion.ParsedSemVer) (string, error) { filename, err := artifact.GetArtifactName(a, version, operatingSystem, e.config.Arch()) if err != nil { return "", errors.New(err, "generating package name failed") @@ -149,7 +150,7 @@ func (e *Downloader) download(ctx context.Context, remoteArtifact string, operat return e.downloadFile(ctx, remoteArtifact, filename, fullPath) } -func (e *Downloader) downloadHash(ctx context.Context, remoteArtifact string, operatingSystem string, a artifact.Artifact, version string) (string, error) { +func (e *Downloader) downloadHash(ctx context.Context, remoteArtifact string, operatingSystem string, a artifact.Artifact, version agtversion.ParsedSemVer) (string, error) { filename, err := artifact.GetArtifactName(a, version, operatingSystem, e.config.Arch()) if err != nil { return "", errors.New(err, "generating package name failed") diff --git a/internal/pkg/agent/application/upgrade/artifact/download/http/downloader_test.go b/internal/pkg/agent/application/upgrade/artifact/download/http/downloader_test.go index 94e3ce856e2..d8c6e2a9304 100644 --- a/internal/pkg/agent/application/upgrade/artifact/download/http/downloader_test.go +++ b/internal/pkg/agent/application/upgrade/artifact/download/http/downloader_test.go @@ -5,13 +5,16 @@ package http import ( + "bytes" "context" "fmt" + "io" "io/ioutil" "net" "net/http" "net/http/httptest" "os" + "path/filepath" "regexp" "strconv" "testing" @@ -23,6 +26,7 @@ import ( "github.com/elastic/elastic-agent/internal/pkg/agent/application/upgrade/artifact" "github.com/elastic/elastic-agent/internal/pkg/agent/application/upgrade/details" "github.com/elastic/elastic-agent/pkg/core/logger" + agtversion "github.com/elastic/elastic-agent/pkg/version" "github.com/docker/go-units" "github.com/stretchr/testify/assert" @@ -341,3 +345,187 @@ func printLogs(t *testing.T, logs []observer.LoggedEntry) { t.Logf("[%s] %s", entry.Level, entry.Message) } } + +var agentSpec = artifact.Artifact{ + Name: "Elastic Agent", + Cmd: "elastic-agent", + Artifact: "beat/elastic-agent", +} + +type downloadHttpResponse struct { + statusCode int + headers http.Header + Body []byte +} + +func TestDownloadVersion(t *testing.T) { + + type fields struct { + config *artifact.Config + } + type args struct { + a artifact.Artifact + version *agtversion.ParsedSemVer + } + tests := []struct { + name string + files map[string]downloadHttpResponse + fields fields + args args + want string + wantErr assert.ErrorAssertionFunc + }{ + { + name: "happy path released version", + files: map[string]downloadHttpResponse{ + "/beat/elastic-agent/elastic-agent-1.2.3-linux-x86_64.tar.gz": { + statusCode: http.StatusOK, + Body: []byte("This is a fake linux elastic agent archive"), + }, + "/beat/elastic-agent/elastic-agent-1.2.3-linux-x86_64.tar.gz.sha512": { + statusCode: http.StatusOK, + Body: []byte("somesha512 elastic-agent-1.2.3-linux-x86_64.tar.gz"), + }, + }, + fields: fields{ + config: &artifact.Config{ + OperatingSystem: "linux", + Architecture: "64", + }, + }, + args: args{a: agentSpec, version: agtversion.NewParsedSemVer(1, 2, 3, "", "")}, + want: "elastic-agent-1.2.3-linux-x86_64.tar.gz", + wantErr: assert.NoError, + }, + { + name: "no hash released version", + files: map[string]downloadHttpResponse{ + "/beat/elastic-agent/elastic-agent-1.2.3-linux-x86_64.tar.gz": { + statusCode: http.StatusOK, + Body: []byte("This is a fake linux elastic agent archive"), + }, + }, + fields: fields{ + config: &artifact.Config{ + OperatingSystem: "linux", + Architecture: "64", + }, + }, + args: args{a: agentSpec, version: agtversion.NewParsedSemVer(1, 2, 3, "", "")}, + want: "elastic-agent-1.2.3-linux-x86_64.tar.gz", + wantErr: assert.Error, + }, + { + name: "happy path snapshot version", + files: map[string]downloadHttpResponse{ + "/beat/elastic-agent/elastic-agent-1.2.3-SNAPSHOT-linux-x86_64.tar.gz": { + statusCode: http.StatusOK, + Body: []byte("This is a fake linux elastic agent archive"), + }, + "/beat/elastic-agent/elastic-agent-1.2.3-SNAPSHOT-linux-x86_64.tar.gz.sha512": { + statusCode: http.StatusOK, + Body: []byte("somesha512 elastic-agent-1.2.3-SNAPSHOT-linux-x86_64.tar.gz"), + }, + }, + fields: fields{ + config: &artifact.Config{ + OperatingSystem: "linux", + Architecture: "64", + }, + }, + args: args{a: agentSpec, version: agtversion.NewParsedSemVer(1, 2, 3, "SNAPSHOT", "")}, + want: "elastic-agent-1.2.3-SNAPSHOT-linux-x86_64.tar.gz", + wantErr: assert.NoError, + }, + { + name: "happy path released version with build metadata", + files: map[string]downloadHttpResponse{ + "/beat/elastic-agent/elastic-agent-1.2.3+build19700101-linux-x86_64.tar.gz": { + statusCode: http.StatusOK, + Body: []byte("This is a fake linux elastic agent archive"), + }, + "/beat/elastic-agent/elastic-agent-1.2.3+build19700101-linux-x86_64.tar.gz.sha512": { + statusCode: http.StatusOK, + Body: []byte("somesha512 elastic-agent-1.2.3-SNAPSHOT-linux-x86_64.tar.gz"), + }, + }, + fields: fields{ + config: &artifact.Config{ + OperatingSystem: "linux", + Architecture: "64", + }, + }, + args: args{a: agentSpec, version: agtversion.NewParsedSemVer(1, 2, 3, "", "build19700101")}, + want: "elastic-agent-1.2.3+build19700101-linux-x86_64.tar.gz", + wantErr: assert.NoError, + }, + { + name: "happy path snapshot version with build metadata", + files: map[string]downloadHttpResponse{ + "/beat/elastic-agent/elastic-agent-1.2.3-SNAPSHOT+build19700101-linux-x86_64.tar.gz": { + statusCode: http.StatusOK, + Body: []byte("This is a fake linux elastic agent archive"), + }, + "/beat/elastic-agent/elastic-agent-1.2.3-SNAPSHOT+build19700101-linux-x86_64.tar.gz.sha512": { + statusCode: http.StatusOK, + Body: []byte("somesha512 elastic-agent-1.2.3-SNAPSHOT+build19700101-linux-x86_64.tar.gz"), + }, + }, + fields: fields{ + config: &artifact.Config{ + OperatingSystem: "linux", + Architecture: "64", + }, + }, + args: args{a: agentSpec, version: agtversion.NewParsedSemVer(1, 2, 3, "SNAPSHOT", "build19700101")}, + want: "elastic-agent-1.2.3-SNAPSHOT+build19700101-linux-x86_64.tar.gz", + wantErr: assert.NoError, + }, + } + + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + + targetDirPath := t.TempDir() + + handleDownload := func(rw http.ResponseWriter, req *http.Request) { + path := req.URL.Path + + resp, ok := tt.files[path] + if !ok { + rw.WriteHeader(http.StatusNotFound) + return + } + + for k, values := range resp.headers { + for _, v := range values { + rw.Header().Set(k, v) + } + } + + rw.WriteHeader(resp.statusCode) + _, err := io.Copy(rw, bytes.NewReader(resp.Body)) + assert.NoError(t, err, "error writing response content") + } + server := httptest.NewServer(http.HandlerFunc(handleDownload)) + defer server.Close() + + elasticClient := server.Client() + log, _ := logger.NewTesting("downloader") + upgradeDetails := details.NewDetails(tt.args.version.String(), details.StateRequested, "") + config := tt.fields.config + config.SourceURI = server.URL + config.TargetDirectory = targetDirPath + downloader := NewDownloaderWithClient(log, config, *elasticClient, upgradeDetails) + + got, err := downloader.Download(context.TODO(), tt.args.a, tt.args.version) + + if !tt.wantErr(t, err, fmt.Sprintf("Download(%v, %v)", tt.args.a, tt.args.version)) { + return + } + + assert.Equalf(t, filepath.Join(targetDirPath, tt.want), got, "Download(%v, %v)", tt.args.a, tt.args.version) + }) + } + +} diff --git a/internal/pkg/agent/application/upgrade/artifact/download/http/verifier.go b/internal/pkg/agent/application/upgrade/artifact/download/http/verifier.go index 50aa64fab1e..5197f931285 100644 --- a/internal/pkg/agent/application/upgrade/artifact/download/http/verifier.go +++ b/internal/pkg/agent/application/upgrade/artifact/download/http/verifier.go @@ -16,6 +16,7 @@ import ( "time" "github.com/elastic/elastic-agent-libs/transport/httpcommon" + agtversion "github.com/elastic/elastic-agent/pkg/version" "github.com/elastic/elastic-agent/internal/pkg/agent/application/upgrade/artifact" "github.com/elastic/elastic-agent/internal/pkg/agent/application/upgrade/artifact/download" @@ -87,7 +88,7 @@ func (v *Verifier) Reload(c *artifact.Config) error { // Verify checks downloaded package on preconfigured // location against a key stored on elastic.co website. -func (v *Verifier) Verify(a artifact.Artifact, version string, skipDefaultPgp bool, pgpBytes ...string) error { +func (v *Verifier) Verify(a artifact.Artifact, version agtversion.ParsedSemVer, skipDefaultPgp bool, pgpBytes ...string) error { artifactPath, err := artifact.GetArtifactPath(a, version, v.config.OS(), v.config.Arch(), v.config.TargetDirectory) if err != nil { return errors.New(err, "retrieving package path") @@ -115,7 +116,7 @@ func (v *Verifier) Verify(a artifact.Artifact, version string, skipDefaultPgp bo return nil } -func (v *Verifier) verifyAsc(a artifact.Artifact, version string, skipDefaultKey bool, pgpSources ...string) error { +func (v *Verifier) verifyAsc(a artifact.Artifact, version agtversion.ParsedSemVer, skipDefaultKey bool, pgpSources ...string) error { filename, err := artifact.GetArtifactName(a, version, v.config.OS(), v.config.Arch()) if err != nil { return errors.New(err, "retrieving package name") diff --git a/internal/pkg/agent/application/upgrade/artifact/download/http/verifier_test.go b/internal/pkg/agent/application/upgrade/artifact/download/http/verifier_test.go index 66c8bd715e0..3d5c74a9a8a 100644 --- a/internal/pkg/agent/application/upgrade/artifact/download/http/verifier_test.go +++ b/internal/pkg/agent/application/upgrade/artifact/download/http/verifier_test.go @@ -64,7 +64,7 @@ func TestVerify(t *testing.T) { t.Fatal(err) } - err = testVerifier.Verify(beatSpec, version, false) + err = testVerifier.Verify(beatSpec, *version, false) require.NoError(t, err) }) } diff --git a/internal/pkg/agent/application/upgrade/artifact/download/snapshot/downloader.go b/internal/pkg/agent/application/upgrade/artifact/download/snapshot/downloader.go index ecf2497851c..5c417531304 100644 --- a/internal/pkg/agent/application/upgrade/artifact/download/snapshot/downloader.go +++ b/internal/pkg/agent/application/upgrade/artifact/download/snapshot/downloader.go @@ -8,7 +8,9 @@ import ( "context" "encoding/json" "fmt" + gohttp "net/http" "strings" + "time" "github.com/elastic/elastic-agent-libs/transport/httpcommon" @@ -26,6 +28,7 @@ const snapshotURIFormat = "https://snapshots.elastic.co/%s-%s/downloads/" type Downloader struct { downloader download.Downloader versionOverride *agtversion.ParsedSemVer + client *gohttp.Client } // NewDownloader creates a downloader which first checks local directory @@ -34,19 +37,30 @@ type Downloader struct { // artifact.Config struct is part of agent configuration and a version // override makes no sense there func NewDownloader(log *logger.Logger, config *artifact.Config, versionOverride *agtversion.ParsedSemVer, upgradeDetails *details.Details) (download.Downloader, error) { - cfg, err := snapshotConfig(config, versionOverride) + client, err := config.HTTPTransportSettings.Client( + httpcommon.WithAPMHTTPInstrumentation(), + httpcommon.WithKeepaliveSettings{Disable: false, IdleConnTimeout: 30 * time.Second}, + ) if err != nil { - return nil, fmt.Errorf("error creating snapshot config: %w", err) + return nil, err } - httpDownloader, err := http.NewDownloader(log, cfg, upgradeDetails) + return NewDownloaderWithClient(log, config, versionOverride, client, upgradeDetails) +} + +func NewDownloaderWithClient(log *logger.Logger, config *artifact.Config, versionOverride *agtversion.ParsedSemVer, client *gohttp.Client, upgradeDetails *details.Details) (download.Downloader, error) { + // TODO: decide an appropriate timeout for this + cfg, err := snapshotConfig(context.TODO(), client, config, versionOverride) if err != nil { - return nil, fmt.Errorf("failed to create snapshot downloader: %w", err) + return nil, fmt.Errorf("error creating snapshot config: %w", err) } + httpDownloader := http.NewDownloaderWithClient(log, cfg, *client, upgradeDetails) + return &Downloader{ downloader: httpDownloader, versionOverride: versionOverride, + client: client, }, nil } @@ -56,7 +70,8 @@ func (e *Downloader) Reload(c *artifact.Config) error { return nil } - cfg, err := snapshotConfig(c, e.versionOverride) + // TODO: decide an appropriate timeout for this + cfg, err := snapshotConfig(context.TODO(), e.client, c, e.versionOverride) if err != nil { return fmt.Errorf("snapshot.downloader: failed to generate snapshot config: %w", err) } @@ -66,12 +81,14 @@ func (e *Downloader) Reload(c *artifact.Config) error { // Download fetches the package from configured source. // Returns absolute path to downloaded package and an error. -func (e *Downloader) Download(ctx context.Context, a artifact.Artifact, version string) (string, error) { - return e.downloader.Download(ctx, a, version) +func (e *Downloader) Download(ctx context.Context, a artifact.Artifact, version *agtversion.ParsedSemVer) (string, error) { + // remove build metadata to match filename of the package for the specific snapshot build + strippedVersion := agtversion.NewParsedSemVer(version.Major(), version.Minor(), version.Patch(), version.Prerelease(), "") + return e.downloader.Download(ctx, a, strippedVersion) } -func snapshotConfig(config *artifact.Config, versionOverride *agtversion.ParsedSemVer) (*artifact.Config, error) { - snapshotURI, err := snapshotURI(versionOverride, config) +func snapshotConfig(ctx context.Context, client *gohttp.Client, config *artifact.Config, versionOverride *agtversion.ParsedSemVer) (*artifact.Config, error) { + snapshotURI, err := snapshotURI(ctx, client, versionOverride, config) if err != nil { return nil, fmt.Errorf("failed to detect remote snapshot repo, proceeding with configured: %w", err) } @@ -88,7 +105,7 @@ func snapshotConfig(config *artifact.Config, versionOverride *agtversion.ParsedS }, nil } -func snapshotURI(versionOverride *agtversion.ParsedSemVer, config *artifact.Config) (string, error) { +func snapshotURI(ctx context.Context, client *gohttp.Client, versionOverride *agtversion.ParsedSemVer, config *artifact.Config) (string, error) { // Respect a non-default source URI even if the version is a snapshot. if config.SourceURI != artifact.DefaultSourceURI { return config.SourceURI, nil @@ -107,14 +124,13 @@ func snapshotURI(versionOverride *agtversion.ParsedSemVer, config *artifact.Conf version = versionOverride.CoreVersion() } - // we go through the artifact API to find the location of the latest snapshot build for the specified version - client, err := config.HTTPTransportSettings.Client(httpcommon.WithAPMHTTPInstrumentation()) + artifactsURI := fmt.Sprintf("https://artifacts-api.elastic.co/v1/search/%s-SNAPSHOT/elastic-agent", version) + request, err := gohttp.NewRequestWithContext(ctx, gohttp.MethodGet, artifactsURI, nil) if err != nil { - return "", err + return "", fmt.Errorf("creating request to artifact api: %w", err) } - artifactsURI := fmt.Sprintf("https://artifacts-api.elastic.co/v1/search/%s-SNAPSHOT/elastic-agent", version) - resp, err := client.Get(artifactsURI) + resp, err := client.Do(request) if err != nil { return "", err } diff --git a/internal/pkg/agent/application/upgrade/artifact/download/snapshot/downloader_test.go b/internal/pkg/agent/application/upgrade/artifact/download/snapshot/downloader_test.go index 18ed58b0d65..d7dc8d433e1 100644 --- a/internal/pkg/agent/application/upgrade/artifact/download/snapshot/downloader_test.go +++ b/internal/pkg/agent/application/upgrade/artifact/download/snapshot/downloader_test.go @@ -5,23 +5,241 @@ package snapshot import ( + "bytes" + "context" + "fmt" + "io" + "net" + "net/http" + "net/http/httptest" + "path/filepath" "testing" + "github.com/stretchr/testify/assert" "github.com/stretchr/testify/require" "github.com/elastic/elastic-agent/internal/pkg/agent/application/upgrade/artifact" - "github.com/elastic/elastic-agent/pkg/version" + "github.com/elastic/elastic-agent/internal/pkg/agent/application/upgrade/details" + "github.com/elastic/elastic-agent/pkg/core/logger" + agtversion "github.com/elastic/elastic-agent/pkg/version" ) func TestNonDefaultSourceURI(t *testing.T) { - version, err := version.ParseVersion("8.12.0-SNAPSHOT") + version, err := agtversion.ParseVersion("8.12.0-SNAPSHOT") require.NoError(t, err) config := artifact.Config{ SourceURI: "localhost:1234", } - sourceURI, err := snapshotURI(version, &config) + sourceURI, err := snapshotURI(context.TODO(), http.DefaultClient, version, &config) require.NoError(t, err) require.Equal(t, config.SourceURI, sourceURI) } + +const artifactAPIElasticAgentSearchResponse = ` +{ + "packages": { + "elastic-agent-1.2.3-SNAPSHOT-darwin-aarch64.tar.gz": { + "url": "https://snapshots.elastic.co/1.2.3-33e8d7e1/downloads/beats/elastic-agent/elastic-agent-1.2.3-SNAPSHOT-darwin-aarch64.tar.gz", + "sha_url": "https://snapshots.elastic.co/1.2.3-33e8d7e1/downloads/beats/elastic-agent/elastic-agent-1.2.3-SNAPSHOT-darwin-aarch64.tar.gz.sha512", + "asc_url": "https://snapshots.elastic.co/1.2.3-33e8d7e1/downloads/beats/elastic-agent/elastic-agent-1.2.3-SNAPSHOT-darwin-aarch64.tar.gz.asc", + "type": "tar", + "architecture": "aarch64", + "os": [ + "darwin" + ] + }, + "elastic-agent-1.2.3-SNAPSHOT-windows-x86_64.zip": { + "url": "https://snapshots.elastic.co/1.2.3-33e8d7e1/downloads/beats/elastic-agent/elastic-agent-1.2.3-SNAPSHOT-windows-x86_64.zip", + "sha_url": "https://snapshots.elastic.co/1.2.3-33e8d7e1/downloads/beats/elastic-agent/elastic-agent-1.2.3-SNAPSHOT-windows-x86_64.zip.sha512", + "asc_url": "https://snapshots.elastic.co/1.2.3-33e8d7e1/downloads/beats/elastic-agent/elastic-agent-1.2.3-SNAPSHOT-windows-x86_64.zip.asc", + "type": "zip", + "architecture": "x86_64", + "os": [ + "windows" + ] + }, + "elastic-agent-core-1.2.3-SNAPSHOT-linux-arm64.tar.gz": { + "url": "https://snapshots.elastic.co/1.2.3-33e8d7e1/downloads/elastic-agent-core/elastic-agent-core-1.2.3-SNAPSHOT-linux-arm64.tar.gz", + "sha_url": "https://snapshots.elastic.co/1.2.3-33e8d7e1/downloads/elastic-agent-core/elastic-agent-core-1.2.3-SNAPSHOT-linux-arm64.tar.gz.sha512", + "asc_url": "https://snapshots.elastic.co/1.2.3-33e8d7e1/downloads/elastic-agent-core/elastic-agent-core-1.2.3-SNAPSHOT-linux-arm64.tar.gz.asc", + "type": "tar", + "architecture": "arm64", + "os": [ + "linux" + ] + }, + "elastic-agent-1.2.3-SNAPSHOT-linux-x86_64.tar.gz": { + "url": "https://snapshots.elastic.co/1.2.3-33e8d7e1/downloads/beats/elastic-agent/elastic-agent-1.2.3-SNAPSHOT-linux-x86_64.tar.gz", + "sha_url": "https://snapshots.elastic.co/1.2.3-33e8d7e1/downloads/beats/elastic-agent/elastic-agent-1.2.3-SNAPSHOT-linux-x86_64.tar.gz.sha512", + "asc_url": "https://snapshots.elastic.co/1.2.3-33e8d7e1/downloads/beats/elastic-agent/elastic-agent-1.2.3-SNAPSHOT-linux-x86_64.tar.gz.asc", + "type": "tar", + "architecture": "x86_64", + "os": [ + "linux" + ] + }, + "elastic-agent-1.2.3-SNAPSHOT-linux-arm64.tar.gz": { + "url": "https://snapshots.elastic.co/1.2.3-33e8d7e1/downloads/beats/elastic-agent/elastic-agent-1.2.3-SNAPSHOT-linux-arm64.tar.gz", + "sha_url": "https://snapshots.elastic.co/1.2.3-33e8d7e1/downloads/beats/elastic-agent/elastic-agent-1.2.3-SNAPSHOT-linux-arm64.tar.gz.sha512", + "asc_url": "https://snapshots.elastic.co/1.2.3-33e8d7e1/downloads/beats/elastic-agent/elastic-agent-1.2.3-SNAPSHOT-linux-arm64.tar.gz.asc", + "type": "tar", + "architecture": "arm64", + "os": [ + "linux" + ] + }, + "elastic-agent-1.2.3-SNAPSHOT-darwin-x86_64.tar.gz": { + "url": "https://snapshots.elastic.co/1.2.3-33e8d7e1/downloads/beats/elastic-agent/elastic-agent-1.2.3-SNAPSHOT-darwin-x86_64.tar.gz", + "sha_url": "https://snapshots.elastic.co/1.2.3-33e8d7e1/downloads/beats/elastic-agent/elastic-agent-1.2.3-SNAPSHOT-darwin-x86_64.tar.gz.sha512", + "asc_url": "https://snapshots.elastic.co/1.2.3-33e8d7e1/downloads/beats/elastic-agent/elastic-agent-1.2.3-SNAPSHOT-darwin-x86_64.tar.gz.asc", + "type": "tar", + "architecture": "x86_64", + "os": [ + "darwin" + ] + } + }, + "manifests": { + "last-update-time": "Tue, 05 Dec 2023 15:47:06 UTC", + "seconds-since-last-update": 201 + } +} +` + +var agentSpec = artifact.Artifact{ + Name: "Elastic Agent", + Cmd: "elastic-agent", + Artifact: "beat/elastic-agent", +} + +type downloadHttpResponse struct { + statusCode int + headers http.Header + Body []byte +} + +func TestDownloadVersion(t *testing.T) { + + type fields struct { + config *artifact.Config + } + type args struct { + a artifact.Artifact + version *agtversion.ParsedSemVer + } + tests := []struct { + name string + files map[string]downloadHttpResponse + fields fields + args args + want string + wantErr assert.ErrorAssertionFunc + }{ + { + name: "happy path snapshot version", + files: map[string]downloadHttpResponse{ + "/1.2.3-33e8d7e1/downloads/beat/elastic-agent/elastic-agent-1.2.3-SNAPSHOT-linux-x86_64.tar.gz": { + statusCode: http.StatusOK, + Body: []byte("This is a fake linux elastic agent archive"), + }, + "/1.2.3-33e8d7e1/downloads/beat/elastic-agent/elastic-agent-1.2.3-SNAPSHOT-linux-x86_64.tar.gz.sha512": { + statusCode: http.StatusOK, + Body: []byte("somesha512 elastic-agent-1.2.3-SNAPSHOT-linux-x86_64.tar.gz"), + }, + "/v1/search/1.2.3-SNAPSHOT/elastic-agent": { + statusCode: http.StatusOK, + headers: map[string][]string{"Content-Type": {"application/json"}}, + Body: []byte(artifactAPIElasticAgentSearchResponse), + }, + }, + fields: fields{ + config: &artifact.Config{ + OperatingSystem: "linux", + Architecture: "64", + }, + }, + args: args{a: agentSpec, version: agtversion.NewParsedSemVer(1, 2, 3, "SNAPSHOT", "")}, + want: "elastic-agent-1.2.3-SNAPSHOT-linux-x86_64.tar.gz", + wantErr: assert.NoError, + }, + { + name: "happy path snapshot version with build metadata", + files: map[string]downloadHttpResponse{ + "/1.2.3-buildid/downloads/beat/elastic-agent/elastic-agent-1.2.3-SNAPSHOT-linux-x86_64.tar.gz": { + statusCode: http.StatusOK, + Body: []byte("This is a fake linux elastic agent archive"), + }, + "/1.2.3-buildid/downloads/beat/elastic-agent/elastic-agent-1.2.3-SNAPSHOT-linux-x86_64.tar.gz.sha512": { + statusCode: http.StatusOK, + Body: []byte("somesha512 elastic-agent-1.2.3-SNAPSHOT-linux-x86_64.tar.gz"), + }, + }, + fields: fields{ + config: &artifact.Config{ + OperatingSystem: "linux", + Architecture: "64", + }, + }, + args: args{a: agentSpec, version: agtversion.NewParsedSemVer(1, 2, 3, "SNAPSHOT", "buildid")}, + want: "elastic-agent-1.2.3-SNAPSHOT-linux-x86_64.tar.gz", + wantErr: assert.NoError, + }, + } + + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + + targetDirPath := t.TempDir() + + handleDownload := func(rw http.ResponseWriter, req *http.Request) { + path := req.URL.Path + + resp, ok := tt.files[path] + if !ok { + rw.WriteHeader(http.StatusNotFound) + return + } + + for k, values := range resp.headers { + for _, v := range values { + rw.Header().Set(k, v) + } + } + + rw.WriteHeader(resp.statusCode) + _, err := io.Copy(rw, bytes.NewReader(resp.Body)) + assert.NoError(t, err, "error writing out response body") + } + server := httptest.NewTLSServer(http.HandlerFunc(handleDownload)) + defer server.Close() + + log, _ := logger.NewTesting("downloader") + upgradeDetails := details.NewDetails(tt.args.version.String(), details.StateRequested, "") + + config := tt.fields.config + config.TargetDirectory = targetDirPath + config.SourceURI = "https://artifacts.elastic.co/downloads/" + + client := server.Client() + transport := client.Transport.(*http.Transport) + + transport.TLSClientConfig.InsecureSkipVerify = true + transport.DialContext = func(_ context.Context, network, s string) (net.Conn, error) { + _ = s + return net.Dial(network, server.Listener.Addr().String()) + } + downloader, err := NewDownloaderWithClient(log, config, tt.args.version, client, upgradeDetails) + require.NoError(t, err) + got, err := downloader.Download(context.TODO(), tt.args.a, tt.args.version) + + if !tt.wantErr(t, err, fmt.Sprintf("Download(%v, %v)", tt.args.a, tt.args.version)) { + return + } + + assert.Equalf(t, filepath.Join(targetDirPath, tt.want), got, "Download(%v, %v)", tt.args.a, tt.args.version) + }) + } + +} diff --git a/internal/pkg/agent/application/upgrade/artifact/download/snapshot/verifier.go b/internal/pkg/agent/application/upgrade/artifact/download/snapshot/verifier.go index 060c5e9fa10..69ed5dfe7f2 100644 --- a/internal/pkg/agent/application/upgrade/artifact/download/snapshot/verifier.go +++ b/internal/pkg/agent/application/upgrade/artifact/download/snapshot/verifier.go @@ -5,6 +5,10 @@ package snapshot import ( + "context" + gohttp "net/http" + + "github.com/elastic/elastic-agent-libs/transport/httpcommon" "github.com/elastic/elastic-agent/internal/pkg/agent/application/upgrade/artifact" "github.com/elastic/elastic-agent/internal/pkg/agent/application/upgrade/artifact/download" "github.com/elastic/elastic-agent/internal/pkg/agent/application/upgrade/artifact/download/http" @@ -16,6 +20,7 @@ import ( type Verifier struct { verifier download.Verifier versionOverride *agtversion.ParsedSemVer + client *gohttp.Client } func (v *Verifier) Name() string { @@ -25,7 +30,14 @@ func (v *Verifier) Name() string { // NewVerifier creates a downloader which first checks local directory // and then fallbacks to remote if configured. func NewVerifier(log *logger.Logger, config *artifact.Config, pgp []byte, versionOverride *agtversion.ParsedSemVer) (download.Verifier, error) { - cfg, err := snapshotConfig(config, versionOverride) + + client, err := config.HTTPTransportSettings.Client(httpcommon.WithAPMHTTPInstrumentation()) + if err != nil { + return nil, err + } + + // TODO: decide an appropriate timeout for this + cfg, err := snapshotConfig(context.TODO(), client, config, versionOverride) if err != nil { return nil, err } @@ -37,12 +49,14 @@ func NewVerifier(log *logger.Logger, config *artifact.Config, pgp []byte, versio return &Verifier{ verifier: v, versionOverride: versionOverride, + client: client, }, nil } // Verify checks the package from configured source. -func (v *Verifier) Verify(a artifact.Artifact, version string, skipDefaultPgp bool, pgpBytes ...string) error { - return v.verifier.Verify(a, version, skipDefaultPgp, pgpBytes...) +func (v *Verifier) Verify(a artifact.Artifact, version agtversion.ParsedSemVer, skipDefaultPgp bool, pgpBytes ...string) error { + strippedVersion := agtversion.NewParsedSemVer(version.Major(), version.Minor(), version.Patch(), version.Prerelease(), "") + return v.verifier.Verify(a, *strippedVersion, skipDefaultPgp, pgpBytes...) } func (v *Verifier) Reload(c *artifact.Config) error { @@ -51,7 +65,8 @@ func (v *Verifier) Reload(c *artifact.Config) error { return nil } - cfg, err := snapshotConfig(c, v.versionOverride) + // TODO: decide an appropriate timeout for this + cfg, err := snapshotConfig(context.TODO(), v.client, c, v.versionOverride) if err != nil { return errors.New(err, "snapshot.downloader: failed to generate snapshot config") } diff --git a/internal/pkg/agent/application/upgrade/artifact/download/verifier.go b/internal/pkg/agent/application/upgrade/artifact/download/verifier.go index 79fc2348711..e466c0119ea 100644 --- a/internal/pkg/agent/application/upgrade/artifact/download/verifier.go +++ b/internal/pkg/agent/application/upgrade/artifact/download/verifier.go @@ -25,6 +25,7 @@ import ( "github.com/elastic/elastic-agent/internal/pkg/agent/application/upgrade/artifact" "github.com/elastic/elastic-agent/internal/pkg/agent/errors" + agtversion "github.com/elastic/elastic-agent/pkg/version" ) const ( @@ -83,7 +84,7 @@ type Verifier interface { // If the checksum does no match Verify returns a *download.ChecksumMismatchError. // If the PGP signature check fails then Verify returns a // *download.InvalidSignatureError. - Verify(a artifact.Artifact, version string, skipDefaultPgp bool, pgpBytes ...string) error + Verify(a artifact.Artifact, version agtversion.ParsedSemVer, skipDefaultPgp bool, pgpBytes ...string) error } // VerifySHA512HashWithCleanup calls VerifySHA512Hash and, in case of a diff --git a/internal/pkg/agent/application/upgrade/details/details.go b/internal/pkg/agent/application/upgrade/details/details.go index 3522b181e59..275560a4cd3 100644 --- a/internal/pkg/agent/application/upgrade/details/details.go +++ b/internal/pkg/agent/application/upgrade/details/details.go @@ -37,6 +37,14 @@ type Metadata struct { // is progressing. DownloadRate details.DownloadRate `json:"download_rate,omitempty" yaml:"download_rate,omitempty"` + // RetryErrorMsg is any error message that is a result of a retryable upgrade + // step, e.g. the download step, being retried. + RetryErrorMsg string `json:"retry_error_msg,omitempty" yaml:"retry_error_msg,omitempty"` + + // RetryUntil is the deadline until when a retryable upgrade step, e.g. the download + // step, will be retried. + RetryUntil *time.Time `json:"retry_until,omitempty" yaml:"retry_until"` + // FailedState is the state an upgrade was in if/when it failed. Use the // Fail() method of UpgradeDetails to correctly record details when // an upgrade fails. @@ -89,6 +97,28 @@ func (d *Details) SetDownloadProgress(percent, rateBytesPerSecond float64) { d.notifyObservers() } +// SetRetryableError sets the RetryErrorMsg metadata field. +func (d *Details) SetRetryableError(retryableError error) { + d.mu.Lock() + defer d.mu.Unlock() + + if retryableError == nil { + d.Metadata.RetryErrorMsg = "" + } else { + d.Metadata.RetryErrorMsg = retryableError.Error() + } + d.notifyObservers() +} + +// SetRetryUntil sets the RetryUntil metadata field. +func (d *Details) SetRetryUntil(retryUntil *time.Time) { + d.mu.Lock() + defer d.mu.Unlock() + + d.Metadata.RetryUntil = retryUntil + d.notifyObservers() +} + // Fail is a convenience method to set the state of the upgrade // to StateFailed, set metadata associated with the failure, and // notify all observers. @@ -163,7 +193,9 @@ func (m Metadata) Equals(otherM Metadata) bool { m.FailedState == otherM.FailedState && m.ErrorMsg == otherM.ErrorMsg && m.DownloadPercent == otherM.DownloadPercent && - m.DownloadRate == otherM.DownloadRate + m.DownloadRate == otherM.DownloadRate && + equalTimePointers(m.RetryUntil, otherM.RetryUntil) && + m.RetryErrorMsg == otherM.RetryErrorMsg } func equalTimePointers(t, otherT *time.Time) bool { diff --git a/internal/pkg/agent/application/upgrade/details/details_test.go b/internal/pkg/agent/application/upgrade/details/details_test.go index d1ade774e7b..10e959252e2 100644 --- a/internal/pkg/agent/application/upgrade/details/details_test.go +++ b/internal/pkg/agent/application/upgrade/details/details_test.go @@ -9,6 +9,7 @@ import ( "errors" "math" "testing" + "time" "github.com/stretchr/testify/require" ) @@ -102,10 +103,16 @@ func TestDetailsDownloadRateJSON(t *testing.T) { func TestEquals(t *testing.T) { details1 := NewDetails("8.12.0", StateDownloading, "foobar") details1.SetDownloadProgress(0.1234, 34.56) + details1.SetRetryableError(errors.New("retryable error")) + retryUntil1 := time.Date(2023, 11, 29, 11, 00, 32, 0, time.UTC) + details1.SetRetryUntil(&retryUntil1) details1.Fail(errors.New("download failed")) details2 := NewDetails("8.12.0", StateDownloading, "foobar") details2.SetDownloadProgress(0.1234, 34.56) + details2.SetRetryableError(errors.New("retryable error")) + retryUntil2 := time.Date(2023, 11, 29, 11, 00, 32, 0, time.UTC) + details2.SetRetryUntil(&retryUntil2) details2.Fail(errors.New("download failed")) details3 := NewDetails("8.12.0", StateDownloading, "foobar") diff --git a/internal/pkg/agent/application/upgrade/marker_access_common.go b/internal/pkg/agent/application/upgrade/marker_access_common.go new file mode 100644 index 00000000000..fc069466c2c --- /dev/null +++ b/internal/pkg/agent/application/upgrade/marker_access_common.go @@ -0,0 +1,32 @@ +// Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one +// or more contributor license agreements. Licensed under the Elastic License; +// you may not use this file except in compliance with the Elastic License. + +package upgrade + +import ( + "fmt" + "os" +) + +func writeMarkerFileCommon(markerFile string, markerBytes []byte, shouldFsync bool) error { + f, err := os.OpenFile(markerFile, os.O_WRONLY|os.O_CREATE, 0600) + if err != nil { + return fmt.Errorf("failed to open upgrade marker file for writing: %w", err) + } + defer f.Close() + + if _, err := f.Write(markerBytes); err != nil { + return fmt.Errorf("failed to write upgrade marker file: %w", err) + } + + if !shouldFsync { + return nil + } + + if err := f.Sync(); err != nil { + return fmt.Errorf("failed to sync upgrade marker file to disk: %w", err) + } + + return nil +} diff --git a/internal/pkg/agent/application/upgrade/marker_access_other.go b/internal/pkg/agent/application/upgrade/marker_access_other.go index ed854160e94..fbcfeae5726 100644 --- a/internal/pkg/agent/application/upgrade/marker_access_other.go +++ b/internal/pkg/agent/application/upgrade/marker_access_other.go @@ -25,6 +25,6 @@ func readMarkerFile(markerFile string) ([]byte, error) { // On non-Windows platforms, writeMarkerFile simply writes the marker file. // See marker_access_windows.go for behavior on Windows platforms. -func writeMarkerFile(markerFile string, markerBytes []byte) error { - return os.WriteFile(markerFilePath(), markerBytes, 0600) +func writeMarkerFile(markerFile string, markerBytes []byte, shouldFsync bool) error { + return writeMarkerFileCommon(markerFile, markerBytes, shouldFsync) } diff --git a/internal/pkg/agent/application/upgrade/marker_access_test.go b/internal/pkg/agent/application/upgrade/marker_access_test.go new file mode 100644 index 00000000000..3f1ff637eaa --- /dev/null +++ b/internal/pkg/agent/application/upgrade/marker_access_test.go @@ -0,0 +1,26 @@ +// Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one +// or more contributor license agreements. Licensed under the Elastic License; +// you may not use this file except in compliance with the Elastic License. + +package upgrade + +import ( + "os" + "path/filepath" + "testing" + + "github.com/stretchr/testify/require" +) + +func TestWriteMarkerFile(t *testing.T) { + tmpDir := t.TempDir() + markerFile := filepath.Join(tmpDir, markerFilename) + + markerBytes := []byte("foo bar") + err := writeMarkerFile(markerFile, markerBytes, true) + require.NoError(t, err) + + data, err := os.ReadFile(markerFile) + require.NoError(t, err) + require.Equal(t, markerBytes, data) +} diff --git a/internal/pkg/agent/application/upgrade/marker_access_windows.go b/internal/pkg/agent/application/upgrade/marker_access_windows.go index 673a57eeabf..cb37f9c0e88 100644 --- a/internal/pkg/agent/application/upgrade/marker_access_windows.go +++ b/internal/pkg/agent/application/upgrade/marker_access_windows.go @@ -49,9 +49,9 @@ func readMarkerFile(markerFile string) ([]byte, error) { // mechanism is necessary since the marker file could be accessed by multiple // processes (the Upgrade Watcher and the main Agent process) at the same time, // which could fail on Windows. -func writeMarkerFile(markerFile string, markerBytes []byte) error { +func writeMarkerFile(markerFile string, markerBytes []byte, shouldFsync bool) error { writeFn := func() error { - return os.WriteFile(markerFile, markerBytes, 0600) + return writeMarkerFileCommon(markerFile, markerBytes, shouldFsync) } if err := accessMarkerFileWithRetries(writeFn); err != nil { diff --git a/internal/pkg/agent/application/upgrade/marker_watcher.go b/internal/pkg/agent/application/upgrade/marker_watcher.go index faefc3e64b2..2d16d5a8ee0 100644 --- a/internal/pkg/agent/application/upgrade/marker_watcher.go +++ b/internal/pkg/agent/application/upgrade/marker_watcher.go @@ -8,6 +8,7 @@ import ( "context" "fmt" "path/filepath" + "sync/atomic" "github.com/fsnotify/fsnotify" @@ -19,12 +20,15 @@ import ( type MarkerWatcher interface { Watch() <-chan UpdateMarker Run(ctx context.Context) error + SetUpgradeStarted() } type MarkerFileWatcher struct { markerFilePath string logger *logger.Logger updateCh chan UpdateMarker + + upgradeStarted atomic.Bool } func newMarkerFileWatcher(upgradeMarkerFilePath string, logger *logger.Logger) MarkerWatcher { @@ -41,6 +45,10 @@ func (mfw *MarkerFileWatcher) Watch() <-chan UpdateMarker { return mfw.updateCh } +func (mfw *MarkerFileWatcher) SetUpgradeStarted() { + mfw.upgradeStarted.Store(true) +} + func (mfw *MarkerFileWatcher) Run(ctx context.Context) error { watcher, err := fsnotify.NewWatcher() if err != nil { @@ -91,10 +99,10 @@ func (mfw *MarkerFileWatcher) Run(ctx context.Context) error { case e.Op&(fsnotify.Create|fsnotify.Write) != 0: // Upgrade marker file was created or updated; read its contents // and send them over the update channel. - mfw.processMarker(version.GetAgentPackageVersion()) + mfw.processMarker(version.GetAgentPackageVersion(), version.Commit()) } case <-doInitialRead: - mfw.processMarker(version.GetAgentPackageVersion()) + mfw.processMarker(version.GetAgentPackageVersion(), version.Commit()) } } }() @@ -102,7 +110,7 @@ func (mfw *MarkerFileWatcher) Run(ctx context.Context) error { return nil } -func (mfw *MarkerFileWatcher) processMarker(currentVersion string) { +func (mfw *MarkerFileWatcher) processMarker(currentVersion string, commit string) { marker, err := loadMarker(mfw.markerFilePath) if err != nil { mfw.logger.Error(err) @@ -115,12 +123,12 @@ func (mfw *MarkerFileWatcher) processMarker(currentVersion string) { } // If the marker exists but the version of Agent we're running right - // now is the same as the prevVersion recorded in the marker, it means - // the upgrade was rolled back. Ideally, this UPG_ROLLBACK state would've + // now is the same as the prevVersion recorded in the marker AND an upgrade + // has not started, it means the upgrade was rolled back. Ideally, this UPG_ROLLBACK state would've // been recorded in the marker's upgrade details field but, in case it // isn't for some reason, we fallback to explicitly setting that state as // part of the upgrade details in the marker. - if marker.PrevVersion == currentVersion { + if marker.PrevVersion == currentVersion && marker.PrevHash == commit && !mfw.upgradeStarted.Load() { if marker.Details == nil { marker.Details = details.NewDetails("unknown", details.StateRollback, marker.GetActionID()) } else { diff --git a/internal/pkg/agent/application/upgrade/marker_watcher_test.go b/internal/pkg/agent/application/upgrade/marker_watcher_test.go index 9d54a9bbcae..956f87df02a 100644 --- a/internal/pkg/agent/application/upgrade/marker_watcher_test.go +++ b/internal/pkg/agent/application/upgrade/marker_watcher_test.go @@ -83,6 +83,10 @@ func TestMarkerWatcher(t *testing.T) { func TestProcessMarker(t *testing.T) { cases := map[string]struct { markerFileContents string + upgradeStarted bool + + currentAgentVersion string + currentAgentHash string expectedErrLogMsg bool expectedDetails *details.Details @@ -91,11 +95,13 @@ func TestProcessMarker(t *testing.T) { markerFileContents: ` invalid `, + upgradeStarted: false, expectedErrLogMsg: true, expectedDetails: nil, }, "no_marker": { markerFileContents: "", + upgradeStarted: false, expectedErrLogMsg: false, expectedDetails: nil, }, @@ -103,6 +109,7 @@ invalid markerFileContents: ` prev_version: 8.9.2 `, + upgradeStarted: false, expectedDetails: &details.Details{ TargetVersion: "unknown", State: details.StateRollback, @@ -114,6 +121,7 @@ prev_version: 8.9.2 details: target_version: 8.9.2 `, + upgradeStarted: false, expectedErrLogMsg: false, expectedDetails: &details.Details{ TargetVersion: "8.9.2", @@ -127,6 +135,7 @@ details: target_version: 8.9.2 state: UPG_WATCHING `, + upgradeStarted: false, expectedErrLogMsg: false, expectedDetails: &details.Details{ TargetVersion: "8.9.2", @@ -140,12 +149,72 @@ details: target_version: 8.9.2 state: UPG_WATCHING `, + upgradeStarted: false, expectedErrLogMsg: false, expectedDetails: &details.Details{ TargetVersion: "8.9.2", State: details.StateWatching, }, }, + "same_version_different_hash": { + markerFileContents: ` +prev_version: 8.9.2 +prev_hash: aaaaaa +details: + target_version: 8.9.2 + state: UPG_WATCHING +`, + currentAgentVersion: "8.9.2", + currentAgentHash: "bbbbbb", + expectedErrLogMsg: false, + expectedDetails: &details.Details{ + TargetVersion: "8.9.2", + State: details.StateWatching, + }, + }, + "same_version_same_hash": { + markerFileContents: ` +prev_version: 8.9.2 +prev_hash: aaaaaa +details: + target_version: 8.9.2 + state: UPG_WATCHING +`, + currentAgentVersion: "8.9.2", + currentAgentHash: "aaaaaa", + expectedErrLogMsg: false, + expectedDetails: &details.Details{ + TargetVersion: "8.9.2", + State: details.StateRollback, + }, + }, + "same_version_same_hash_no_details": { + markerFileContents: ` +prev_version: 8.9.2 +prev_hash: aaaaaa +`, + currentAgentVersion: "8.9.2", + currentAgentHash: "aaaaaa", + expectedErrLogMsg: false, + expectedDetails: &details.Details{ + TargetVersion: "unknown", + State: details.StateRollback, + }, + }, + "upgrade_started": { + markerFileContents: ` +prev_version: 8.9.2 +details: + target_version: 8.9.2 + state: UPG_REPLACING +`, + upgradeStarted: true, + expectedErrLogMsg: false, + expectedDetails: &details.Details{ + TargetVersion: "8.9.2", + State: details.StateReplacing, + }, + }, } for name, test := range cases { @@ -182,7 +251,23 @@ details: } }() - mfw.processMarker("8.9.2") + // default values for version and hash + currentVersion := "8.9.2" + currentCommit := "" + + // apply overrides from testcase + if test.currentAgentVersion != "" { + currentVersion = test.currentAgentVersion + } + if test.currentAgentHash != "" { + currentCommit = test.currentAgentHash + } + + if test.upgradeStarted { + mfw.SetUpgradeStarted() + } + + mfw.processMarker(currentVersion, currentCommit) // error loading marker if test.expectedErrLogMsg { diff --git a/internal/pkg/agent/application/upgrade/step_download.go b/internal/pkg/agent/application/upgrade/step_download.go index 1623d722600..579ec656f55 100644 --- a/internal/pkg/agent/application/upgrade/step_download.go +++ b/internal/pkg/agent/application/upgrade/step_download.go @@ -121,7 +121,7 @@ func (u *Upgrader) downloadArtifact(ctx context.Context, version, sourceURI stri } } - if err := verifier.Verify(agentArtifact, parsedVersion.VersionWithPrerelease(), skipDefaultPgp, pgpBytes...); err != nil { + if err := verifier.Verify(agentArtifact, *parsedVersion, skipDefaultPgp, pgpBytes...); err != nil { return "", errors.New(err, "failed verification of agent binary") } return path, nil @@ -219,7 +219,7 @@ func (u *Upgrader) downloadOnce( // All download artifacts expect a name that includes .[-SNAPSHOT] so we have to // make sure not to include build metadata we might have in the parsed version (for snapshots we already // used that to configure the URL we download the files from) - path, err := downloader.Download(ctx, agentArtifact, version.VersionWithPrerelease()) + path, err := downloader.Download(ctx, agentArtifact, version) if err != nil { return "", fmt.Errorf("unable to download package: %w", err) } @@ -235,9 +235,12 @@ func (u *Upgrader) downloadWithRetries( settings *artifact.Config, upgradeDetails *details.Details, ) (string, error) { - cancelCtx, cancel := context.WithTimeout(ctx, settings.Timeout) + cancelDeadline := time.Now().Add(settings.Timeout) + cancelCtx, cancel := context.WithDeadline(ctx, cancelDeadline) defer cancel() + upgradeDetails.SetRetryUntil(&cancelDeadline) + expBo := backoff.NewExponentialBackOff() expBo.InitialInterval = settings.RetrySleepInitDuration boCtx := backoff.WithContext(expBo, cancelCtx) @@ -259,11 +262,16 @@ func (u *Upgrader) downloadWithRetries( opFailureNotificationFn := func(err error, retryAfter time.Duration) { u.log.Warnf("download attempt %d failed: %s; retrying in %s.", attempt, err.Error(), retryAfter) + upgradeDetails.SetRetryableError(err) } if err := backoff.RetryNotify(opFn, boCtx, opFailureNotificationFn); err != nil { return "", err } + // Clear retry details upon success + upgradeDetails.SetRetryableError(nil) + upgradeDetails.SetRetryUntil(nil) + return path, nil } diff --git a/internal/pkg/agent/application/upgrade/step_download_test.go b/internal/pkg/agent/application/upgrade/step_download_test.go index e9a772a3fc5..af485aaca77 100644 --- a/internal/pkg/agent/application/upgrade/step_download_test.go +++ b/internal/pkg/agent/application/upgrade/step_download_test.go @@ -28,7 +28,7 @@ type mockDownloader struct { downloadErr error } -func (md *mockDownloader) Download(ctx context.Context, agentArtifact artifact.Artifact, version string) (string, error) { +func (md *mockDownloader) Download(ctx context.Context, a artifact.Artifact, version *agtversion.ParsedSemVer) (string, error) { return md.downloadPath, md.downloadErr } @@ -94,7 +94,10 @@ func TestDownloadWithRetries(t *testing.T) { parsedVersion, err := agtversion.ParseVersion("8.9.0") require.NoError(t, err) - upgradeDetails := details.NewDetails(parsedVersion.String(), details.StateRequested, "") + + upgradeDetails, upgradeDetailsRetryUntil, upgradeDetailsRetryUntilWasUnset, upgradeDetailsRetryErrorMsg := mockUpgradeDetails(parsedVersion) + minRetryDeadline := time.Now().Add(settings.Timeout) + path, err := u.downloadWithRetries(context.Background(), mockDownloaderCtor, parsedVersion, &settings, upgradeDetails) require.NoError(t, err) require.Equal(t, expectedDownloadPath, path) @@ -102,6 +105,16 @@ func TestDownloadWithRetries(t *testing.T) { logs := obs.TakeAll() require.Len(t, logs, 1) require.Equal(t, "download attempt 1", logs[0].Message) + + // Check that upgradeDetails.Metadata.RetryUntil was set at some point + // during the retryable download and then check that it was unset upon + // successful download. + require.GreaterOrEqual(t, *upgradeDetailsRetryUntil, minRetryDeadline) + require.True(t, *upgradeDetailsRetryUntilWasUnset) + require.Nil(t, upgradeDetails.Metadata.RetryUntil) + + // Check that upgradeDetails.Metadata.RetryErrorMsg was never set. + require.Empty(t, *upgradeDetailsRetryErrorMsg) }) // Downloader constructor failing on first attempt, but succeeding on second attempt (= first retry) @@ -131,7 +144,10 @@ func TestDownloadWithRetries(t *testing.T) { parsedVersion, err := agtversion.ParseVersion("8.9.0") require.NoError(t, err) - upgradeDetails := details.NewDetails(parsedVersion.String(), details.StateRequested, "") + + upgradeDetails, upgradeDetailsRetryUntil, upgradeDetailsRetryUntilWasUnset, upgradeDetailsRetryErrorMsg := mockUpgradeDetails(parsedVersion) + minRetryDeadline := time.Now().Add(settings.Timeout) + path, err := u.downloadWithRetries(context.Background(), mockDownloaderCtor, parsedVersion, &settings, upgradeDetails) require.NoError(t, err) require.Equal(t, expectedDownloadPath, path) @@ -141,6 +157,19 @@ func TestDownloadWithRetries(t *testing.T) { require.Equal(t, "download attempt 1", logs[0].Message) require.Contains(t, logs[1].Message, "unable to create fetcher: failed to construct downloader") require.Equal(t, "download attempt 2", logs[2].Message) + + // Check that upgradeDetails.Metadata.RetryUntil was set at some point + // during the retryable download and then check that it was unset upon + // successful download. + require.GreaterOrEqual(t, *upgradeDetailsRetryUntil, minRetryDeadline) + require.True(t, *upgradeDetailsRetryUntilWasUnset) + require.Nil(t, upgradeDetails.Metadata.RetryUntil) + + // Check that upgradeDetails.Metadata.RetryErrorMsg was set at some point + // during the retryable download and then check that it was unset upon + // successful download. + require.NotEmpty(t, *upgradeDetailsRetryErrorMsg) + require.Empty(t, upgradeDetails.Metadata.RetryErrorMsg) }) // Download failing on first attempt, but succeeding on second attempt (= first retry) @@ -170,7 +199,10 @@ func TestDownloadWithRetries(t *testing.T) { parsedVersion, err := agtversion.ParseVersion("8.9.0") require.NoError(t, err) - upgradeDetails := details.NewDetails(parsedVersion.String(), details.StateRequested, "") + + upgradeDetails, upgradeDetailsRetryUntil, upgradeDetailsRetryUntilWasUnset, upgradeDetailsRetryErrorMsg := mockUpgradeDetails(parsedVersion) + minRetryDeadline := time.Now().Add(settings.Timeout) + path, err := u.downloadWithRetries(context.Background(), mockDownloaderCtor, parsedVersion, &settings, upgradeDetails) require.NoError(t, err) require.Equal(t, expectedDownloadPath, path) @@ -180,6 +212,19 @@ func TestDownloadWithRetries(t *testing.T) { require.Equal(t, "download attempt 1", logs[0].Message) require.Contains(t, logs[1].Message, "unable to download package: download failed; retrying") require.Equal(t, "download attempt 2", logs[2].Message) + + // Check that upgradeDetails.Metadata.RetryUntil was set at some point + // during the retryable download and then check that it was unset upon + // successful download. + require.GreaterOrEqual(t, *upgradeDetailsRetryUntil, minRetryDeadline) + require.True(t, *upgradeDetailsRetryUntilWasUnset) + require.Nil(t, upgradeDetails.Metadata.RetryUntil) + + // Check that upgradeDetails.Metadata.RetryErrorMsg was set at some point + // during the retryable download and then check that it was unset upon + // successful download. + require.NotEmpty(t, *upgradeDetailsRetryErrorMsg) + require.Empty(t, upgradeDetails.Metadata.RetryErrorMsg) }) // Download timeout expired (before all retries are exhausted) @@ -197,7 +242,10 @@ func TestDownloadWithRetries(t *testing.T) { parsedVersion, err := agtversion.ParseVersion("8.9.0") require.NoError(t, err) - upgradeDetails := details.NewDetails(parsedVersion.String(), details.StateRequested, "") + + upgradeDetails, upgradeDetailsRetryUntil, upgradeDetailsRetryUntilWasUnset, upgradeDetailsRetryErrorMsg := mockUpgradeDetails(parsedVersion) + minRetryDeadline := time.Now().Add(testCaseSettings.Timeout) + path, err := u.downloadWithRetries(context.Background(), mockDownloaderCtor, parsedVersion, &testCaseSettings, upgradeDetails) require.Equal(t, "context deadline exceeded", err.Error()) require.Equal(t, "", path) @@ -209,5 +257,48 @@ func TestDownloadWithRetries(t *testing.T) { require.Equal(t, fmt.Sprintf("download attempt %d", i+1), logs[(2*i)].Message) require.Contains(t, logs[(2*i+1)].Message, "unable to download package: download failed; retrying") } + + // Check that upgradeDetails.Metadata.RetryUntil was set at some point + // during the retryable download and then check that it was never unset, + // since we didn't have a successful download. + require.GreaterOrEqual(t, *upgradeDetailsRetryUntil, minRetryDeadline) + require.False(t, *upgradeDetailsRetryUntilWasUnset) + require.Equal(t, *upgradeDetailsRetryUntil, *upgradeDetails.Metadata.RetryUntil) + + // Check that upgradeDetails.Metadata.RetryErrorMsg was set at some point + // during the retryable download and then check that it was never unset, + //since we didn't have a successful download. + require.NotEmpty(t, *upgradeDetailsRetryErrorMsg) + require.Equal(t, *upgradeDetailsRetryErrorMsg, upgradeDetails.Metadata.RetryErrorMsg) }) } + +// mockUpgradeDetails returns a *details.Details value that has an observer registered on it for inspecting +// certain properties of the object being set and unset. It also returns: +// - a *time.Time value, which will be not nil if Metadata.RetryUntil is set on the mock value, +// - a *bool value, which will be true if Metadata.RetryUntil is set and then unset on the mock value, +// - a *string value, which will be non-empty if Metadata.RetryErrorMsg is set on the mock value. +func mockUpgradeDetails(parsedVersion *agtversion.ParsedSemVer) (*details.Details, *time.Time, *bool, *string) { + var upgradeDetailsRetryUntil time.Time + var upgradeDetailsRetryUntilWasUnset bool + var upgradeDetailsRetryErrorMsg string + + upgradeDetails := details.NewDetails(parsedVersion.String(), details.StateRequested, "") + upgradeDetails.RegisterObserver(func(details *details.Details) { + if details.Metadata.RetryUntil != nil { + upgradeDetailsRetryUntil = *details.Metadata.RetryUntil + } + + if !upgradeDetailsRetryUntil.IsZero() && details.Metadata.RetryUntil == nil { + upgradeDetailsRetryUntilWasUnset = true + } + + if details.Metadata.RetryErrorMsg != "" { + upgradeDetailsRetryErrorMsg = details.Metadata.RetryErrorMsg + } + }) + + return upgradeDetails, + &upgradeDetailsRetryUntil, &upgradeDetailsRetryUntilWasUnset, + &upgradeDetailsRetryErrorMsg +} diff --git a/internal/pkg/agent/application/upgrade/step_mark.go b/internal/pkg/agent/application/upgrade/step_mark.go index b5743582317..bca67d307f0 100644 --- a/internal/pkg/agent/application/upgrade/step_mark.go +++ b/internal/pkg/agent/application/upgrade/step_mark.go @@ -194,7 +194,10 @@ func loadMarker(markerFile string) (*UpdateMarker, error) { }, nil } -func SaveMarker(marker *UpdateMarker) error { +// SaveMarker serializes and persists the given upgrade marker to disk. +// For critical upgrade transitions, pass shouldFsync as true so the marker +// file is immediately flushed to persistent storage. +func SaveMarker(marker *UpdateMarker, shouldFsync bool) error { makerSerializer := &updateMarkerSerializer{ Hash: marker.Hash, UpdatedOn: marker.UpdatedOn, @@ -209,7 +212,7 @@ func SaveMarker(marker *UpdateMarker) error { return err } - return writeMarkerFile(markerFilePath(), markerBytes) + return writeMarkerFile(markerFilePath(), markerBytes, shouldFsync) } func markerFilePath() string { diff --git a/internal/pkg/agent/application/upgrade/upgrade.go b/internal/pkg/agent/application/upgrade/upgrade.go index 4e9eb72bacd..b299dd3fd75 100644 --- a/internal/pkg/agent/application/upgrade/upgrade.go +++ b/internal/pkg/agent/application/upgrade/upgrade.go @@ -134,6 +134,16 @@ func (u *Upgrader) Upgradeable() bool { // Upgrade upgrades running agent, function returns shutdown callback that must be called by reexec. func (u *Upgrader) Upgrade(ctx context.Context, version string, sourceURI string, action *fleetapi.ActionUpgrade, det *details.Details, skipVerifyOverride bool, skipDefaultPgp bool, pgpBytes ...string) (_ reexec.ShutdownCallbackFn, err error) { u.log.Infow("Upgrading agent", "version", version, "source_uri", sourceURI) + + // Inform the Upgrade Marker Watcher that we've started upgrading. Note that this + // is only possible to do in-memory since, today, the process that's initiating + // the upgrade is the same as the Agent process in which the Upgrade Marker Watcher is + // running. If/when, in the future, the process initiating the upgrade is separated + // from the Agent process in which the Upgrade Marker Watcher is running, such in-memory + // communication will need to be replaced with inter-process communication (e.g. via + // a file, e.g. the Upgrade Marker file or something else). + u.markerWatcher.SetUpgradeStarted() + span, ctx := apm.StartSpan(ctx, "upgrade", "app.internal") defer span.End() @@ -243,7 +253,7 @@ func (u *Upgrader) Ack(ctx context.Context, acker acker.Acker) error { marker.Acked = true - return SaveMarker(marker) + return SaveMarker(marker, false) } func (u *Upgrader) MarkerWatcher() MarkerWatcher { diff --git a/internal/pkg/agent/cmd/status.go b/internal/pkg/agent/cmd/status.go index 1bc515b376b..b61c6104785 100644 --- a/internal/pkg/agent/cmd/status.go +++ b/internal/pkg/agent/cmd/status.go @@ -14,6 +14,7 @@ import ( "time" "github.com/elastic/elastic-agent/internal/pkg/agent/application/upgrade/details" + "github.com/elastic/elastic-agent/pkg/control" "github.com/elastic/elastic-agent/pkg/control/v2/client" "github.com/elastic/elastic-agent/pkg/control/v2/cproto" @@ -168,8 +169,8 @@ func listUpgradeDetails(l list.Writer, upgradeDetails *cproto.UpgradeDetails) { if upgradeDetails.Metadata != nil { l.AppendItem("metadata") l.Indent() - if upgradeDetails.Metadata.ScheduledAt != nil && !upgradeDetails.Metadata.ScheduledAt.AsTime().IsZero() { - l.AppendItem("scheduled_at: " + upgradeDetails.Metadata.ScheduledAt.AsTime().UTC().Format(time.RFC3339)) + if upgradeDetails.Metadata.ScheduledAt != "" { + l.AppendItem("scheduled_at: " + upgradeDetails.Metadata.ScheduledAt) } if upgradeDetails.Metadata.FailedState != "" { l.AppendItem("failed_state: " + upgradeDetails.Metadata.FailedState) @@ -180,6 +181,12 @@ func listUpgradeDetails(l list.Writer, upgradeDetails *cproto.UpgradeDetails) { if upgradeDetails.State == string(details.StateDownloading) { l.AppendItem(fmt.Sprintf("download_percent: %.2f%%", upgradeDetails.Metadata.DownloadPercent*100)) } + if upgradeDetails.Metadata.RetryUntil != "" { + l.AppendItem("retry_until: " + humanDurationUntil(upgradeDetails.Metadata.RetryUntil, time.Now())) + } + if upgradeDetails.Metadata.RetryErrorMsg != "" { + l.AppendItem("retry_error_msg: " + upgradeDetails.Metadata.RetryErrorMsg) + } l.UnIndent() } @@ -236,3 +243,13 @@ func yamlOutput(w io.Writer, out interface{}) error { fmt.Fprintf(w, "%s\n", bytes) return nil } + +func humanDurationUntil(targetTime string, from time.Time) string { + target, err := time.Parse(control.TimeFormat(), targetTime) + if err != nil { + return targetTime + } + + until := target.Sub(from) + return until.String() +} diff --git a/internal/pkg/agent/cmd/status_test.go b/internal/pkg/agent/cmd/status_test.go index f405f217215..9d44029bf64 100644 --- a/internal/pkg/agent/cmd/status_test.go +++ b/internal/pkg/agent/cmd/status_test.go @@ -9,10 +9,11 @@ import ( "fmt" "os" "path/filepath" + "regexp" "testing" "time" - "google.golang.org/protobuf/types/known/timestamppb" + "github.com/elastic/elastic-agent/pkg/control" "github.com/jedib0t/go-pretty/v6/list" @@ -208,7 +209,7 @@ func TestListUpgradeDetails(t *testing.T) { TargetVersion: "8.12.0", State: "UPG_DOWNLOADING", Metadata: &cproto.UpgradeDetailsMetadata{ - ScheduledAt: timestamppb.New(now), + ScheduledAt: now.Format(control.TimeFormat()), DownloadPercent: 0.17679, }, }, @@ -217,8 +218,29 @@ func TestListUpgradeDetails(t *testing.T) { ├─ state: UPG_DOWNLOADING └─ metadata ├─ scheduled_at: %s - └─ download_percent: 17.68%%`, now.Format(time.RFC3339)), - }} + └─ download_percent: 17.68%%`, now.Format(control.TimeFormat())), + }, + "retrying_downloading": { + upgradeDetails: &cproto.UpgradeDetails{ + TargetVersion: "8.12.0", + State: "UPG_DOWNLOADING", + Metadata: &cproto.UpgradeDetailsMetadata{ + ScheduledAt: now.Format(control.TimeFormat()), + DownloadPercent: 0, + RetryErrorMsg: "unable to download, will retry", + RetryUntil: "1h59m32s", + }, + }, + expectedOutput: fmt.Sprintf(`── upgrade_details + ├─ target_version: 8.12.0 + ├─ state: UPG_DOWNLOADING + └─ metadata + ├─ scheduled_at: %s + ├─ download_percent: 0.00%% + ├─ retry_until: 1h59m32s + └─ retry_error_msg: unable to download, will retry`, now.Format(control.TimeFormat())), + }, + } for name, test := range cases { t.Run(name, func(t *testing.T) { @@ -231,3 +253,30 @@ func TestListUpgradeDetails(t *testing.T) { }) } } + +func TestHumanDurationUntil(t *testing.T) { + now := time.Now() + cases := map[string]struct { + targetTimeStr string + + // For some reason the calculated duration is never precise + // so we use a regexp instead. + expectedDurationRegexp string + }{ + "valid_time": { + targetTimeStr: now.Add(3 * time.Hour).Format(control.TimeFormat()), + expectedDurationRegexp: `^2h59m59\.\d+s$`, + }, + "invalid_time": { + targetTimeStr: "foobar", + expectedDurationRegexp: "^foobar$", + }, + } + + for name, test := range cases { + t.Run(name, func(t *testing.T) { + actualTimeStr := humanDurationUntil(test.targetTimeStr, now) + require.Regexp(t, regexp.MustCompile(test.expectedDurationRegexp), actualTimeStr) + }) + } +} diff --git a/internal/pkg/agent/cmd/watch.go b/internal/pkg/agent/cmd/watch.go index b3d5727175c..a895e10df5a 100644 --- a/internal/pkg/agent/cmd/watch.go +++ b/internal/pkg/agent/cmd/watch.go @@ -126,7 +126,7 @@ func watchCmd(log *logp.Logger, cfg *configuration.Configuration) error { } marker.Details.SetState(details.StateRollback) - err = upgrade.SaveMarker(marker) + err = upgrade.SaveMarker(marker, true) if err != nil { log.Errorf("unable to save upgrade marker before attempting to rollback: %s", err.Error()) } @@ -136,7 +136,7 @@ func watchCmd(log *logp.Logger, cfg *configuration.Configuration) error { log.Error("rollback failed", err) marker.Details.Fail(err) - err = upgrade.SaveMarker(marker) + err = upgrade.SaveMarker(marker, true) if err != nil { log.Errorf("unable to save upgrade marker after rollback failed: %s", err.Error()) } @@ -146,7 +146,7 @@ func watchCmd(log *logp.Logger, cfg *configuration.Configuration) error { // watch succeeded - upgrade was successful! marker.Details.SetState(details.StateCompleted) - err = upgrade.SaveMarker(marker) + err = upgrade.SaveMarker(marker, false) if err != nil { log.Errorf("unable to save upgrade marker after successful watch: %s", err.Error()) } diff --git a/internal/pkg/composable/providers/kubernetessecrets/config.go b/internal/pkg/composable/providers/kubernetessecrets/config.go index 95ff308c3aa..0f021a3aaae 100644 --- a/internal/pkg/composable/providers/kubernetessecrets/config.go +++ b/internal/pkg/composable/providers/kubernetessecrets/config.go @@ -4,10 +4,26 @@ package kubernetessecrets -import "github.com/elastic/elastic-agent-autodiscover/kubernetes" +import ( + "time" + + "github.com/elastic/elastic-agent-autodiscover/kubernetes" +) // Config for kubernetes provider type Config struct { KubeConfig string `config:"kube_config"` KubeClientOptions kubernetes.KubeClientOptions `config:"kube_client_options"` + + RefreshInterval time.Duration `config:"cache_refresh_interval"` + TTLDelete time.Duration `config:"cache_ttl"` + RequestTimeout time.Duration `config:"cache_request_timeout"` + DisableCache bool `config:"cache_disable"` +} + +func (c *Config) InitDefaults() { + c.RefreshInterval = 60 * time.Second + c.TTLDelete = 1 * time.Hour + c.RequestTimeout = 5 * time.Second + c.DisableCache = false } diff --git a/internal/pkg/composable/providers/kubernetessecrets/kubernetes_secrets.go b/internal/pkg/composable/providers/kubernetessecrets/kubernetes_secrets.go index 1537a232dd1..4bcf90470b3 100644 --- a/internal/pkg/composable/providers/kubernetessecrets/kubernetes_secrets.go +++ b/internal/pkg/composable/providers/kubernetessecrets/kubernetes_secrets.go @@ -8,6 +8,7 @@ import ( "context" "strings" "sync" + "time" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" k8sclient "k8s.io/client-go/kubernetes" @@ -33,6 +34,14 @@ type contextProviderK8sSecrets struct { clientMx sync.Mutex client k8sclient.Interface + + secretsCacheMx sync.RWMutex + secretsCache map[string]*secretsData +} + +type secretsData struct { + value string + lastAccess time.Time } // ContextProviderBuilder builds the context provider. @@ -46,22 +55,154 @@ func ContextProviderBuilder(logger *logger.Logger, c *config.Config, managed boo return nil, errors.New(err, "failed to unpack configuration") } return &contextProviderK8sSecrets{ - logger: logger, - config: &cfg, + logger: logger, + config: &cfg, + secretsCache: make(map[string]*secretsData), }, nil } func (p *contextProviderK8sSecrets) Fetch(key string) (string, bool) { - // key = "kubernetes_secrets.somenamespace.somesecret.value" + if p.config.DisableCache { + valid := p.validateKey(key) + if valid { + return p.fetchSecretWithTimeout(key) + } else { + return "", false + } + } else { + return p.getFromCache(key) + } +} + +// Run initializes the k8s secrets context provider. +func (p *contextProviderK8sSecrets) Run(ctx context.Context, comm corecomp.ContextProviderComm) error { + client, err := getK8sClientFunc(p.config.KubeConfig, p.config.KubeClientOptions) + if err != nil { + p.logger.Debugf("kubernetes_secrets provider skipped, unable to connect: %s", err) + return nil + } p.clientMx.Lock() - client := p.client + p.client = client p.clientMx.Unlock() - if client == nil { - return "", false + + if !p.config.DisableCache { + go p.updateSecrets(ctx) } + + <-comm.Done() + + p.clientMx.Lock() + p.client = nil + p.clientMx.Unlock() + return comm.Err() +} + +func getK8sClient(kubeconfig string, opt kubernetes.KubeClientOptions) (k8sclient.Interface, error) { + return kubernetes.GetKubernetesClient(kubeconfig, opt) +} + +// Update the secrets in the cache every RefreshInterval +func (p *contextProviderK8sSecrets) updateSecrets(ctx context.Context) { + timer := time.NewTimer(p.config.RefreshInterval) + for { + select { + case <-ctx.Done(): + return + case <-timer.C: + p.updateCache() + timer.Reset(p.config.RefreshInterval) + } + } +} + +// mergeWithCurrent merges the updated map with the cache map. +// This function needs to be called between the mutex lock for the map. +func (p *contextProviderK8sSecrets) mergeWithCurrent(updatedMap map[string]*secretsData) map[string]*secretsData { + merged := make(map[string]*secretsData) + + for name, data := range p.secretsCache { + diff := time.Since(data.lastAccess) + if diff < p.config.TTLDelete { + merged[name] = data + } + } + + for name, data := range updatedMap { + // We need to check if the key is already in the new map. If it is, lastAccess cannot be overwritten since + // it could have been updated when trying to fetch the secret at the same time we are running update cache. + // In that case, we only update the value. + if _, ok := merged[name]; ok { + merged[name].value = data.value + } + } + + return merged +} + +func (p *contextProviderK8sSecrets) updateCache() { + // deleting entries does not free the memory, so we need to create a new map + // to place the secrets we want to keep + cacheTmp := make(map[string]*secretsData) + + // to not hold the lock for long, we copy the current state of the cache map + copyMap := make(map[string]secretsData) + p.secretsCacheMx.RLock() + for name, data := range p.secretsCache { + copyMap[name] = *data + } + p.secretsCacheMx.RUnlock() + + for name, data := range copyMap { + diff := time.Since(data.lastAccess) + if diff < p.config.TTLDelete { + value, ok := p.fetchSecretWithTimeout(name) + if ok { + newData := &secretsData{ + value: value, + lastAccess: data.lastAccess, + } + cacheTmp[name] = newData + } + + } + } + + // While the cache was updated, it is possible that some secret was added through another go routine. + // We need to merge the updated map with the current cache map to catch the new entries and avoid + // loss of data. + p.secretsCacheMx.Lock() + p.secretsCache = p.mergeWithCurrent(cacheTmp) + p.secretsCacheMx.Unlock() +} + +func (p *contextProviderK8sSecrets) getFromCache(key string) (string, bool) { + p.secretsCacheMx.RLock() + _, ok := p.secretsCache[key] + p.secretsCacheMx.RUnlock() + + // if value is still not present in cache, it is possible we haven't tried to fetch it yet + if !ok { + value, ok := p.addToCache(key) + // if it was not possible to fetch the secret, return + if !ok { + return value, ok + } + } + + p.secretsCacheMx.Lock() + data, ok := p.secretsCache[key] + data.lastAccess = time.Now() + pass := data.value + p.secretsCacheMx.Unlock() + + return pass, ok +} + +func (p *contextProviderK8sSecrets) validateKey(key string) bool { + // Make sure the key has the expected format "kubernetes_secrets.somenamespace.somesecret.value" tokens := strings.Split(key, ".") if len(tokens) > 0 && tokens[0] != "kubernetes_secrets" { - return "", false + return false } if len(tokens) != 4 { p.logger.Debugf( @@ -69,44 +210,80 @@ func (p *contextProviderK8sSecrets) Fetch(key string) (string, bool) { key, "kubernetes_secrets.somenamespace.somesecret.value", ) + return false + } + return true +} + +func (p *contextProviderK8sSecrets) addToCache(key string) (string, bool) { + valid := p.validateKey(key) + if !valid { return "", false } + + value, ok := p.fetchSecretWithTimeout(key) + if ok { + p.secretsCacheMx.Lock() + p.secretsCache[key] = &secretsData{value: value} + p.secretsCacheMx.Unlock() + } + return value, ok +} + +type Result struct { + value string + ok bool +} + +func (p *contextProviderK8sSecrets) fetchSecretWithTimeout(key string) (string, bool) { + ctxTimeout, cancel := context.WithTimeout(context.Background(), p.config.RequestTimeout) + defer cancel() + + resultCh := make(chan Result, 1) + p.fetchSecret(ctxTimeout, key, resultCh) + + select { + case <-ctxTimeout.Done(): + p.logger.Errorf("Could not retrieve value for key %v: %v", key, ctxTimeout.Err()) + return "", false + case result := <-resultCh: + return result.value, result.ok + } +} + +func (p *contextProviderK8sSecrets) fetchSecret(context context.Context, key string, resultCh chan Result) { + p.clientMx.Lock() + client := p.client + p.clientMx.Unlock() + if client == nil { + resultCh <- Result{value: "", ok: false} + return + } + + tokens := strings.Split(key, ".") + // key has the format "kubernetes_secrets.somenamespace.somesecret.value" + // This function is only called from: + // - addToCache, where we already validated that the key has the right format. + // - updateCache, where the results are only added to the cache through addToCache + // Because of this we no longer need to validate the key ns := tokens[1] secretName := tokens[2] secretVar := tokens[3] - secretIntefrace := client.CoreV1().Secrets(ns) - ctx := context.TODO() - secret, err := secretIntefrace.Get(ctx, secretName, metav1.GetOptions{}) + secretInterface := client.CoreV1().Secrets(ns) + secret, err := secretInterface.Get(context, secretName, metav1.GetOptions{}) + if err != nil { p.logger.Errorf("Could not retrieve secret from k8s API: %v", err) - return "", false + resultCh <- Result{value: "", ok: false} + return } if _, ok := secret.Data[secretVar]; !ok { p.logger.Errorf("Could not retrieve value %v for secret %v", secretVar, secretName) - return "", false + resultCh <- Result{value: "", ok: false} + return } - secretString := secret.Data[secretVar] - return string(secretString), true -} -// Run initializes the k8s secrets context provider. -func (p *contextProviderK8sSecrets) Run(ctx context.Context, comm corecomp.ContextProviderComm) error { - client, err := getK8sClientFunc(p.config.KubeConfig, p.config.KubeClientOptions) - if err != nil { - p.logger.Debugf("Kubernetes_secrets provider skipped, unable to connect: %s", err) - return nil - } - p.clientMx.Lock() - p.client = client - p.clientMx.Unlock() - <-comm.Done() - p.clientMx.Lock() - p.client = nil - p.clientMx.Unlock() - return comm.Err() -} - -func getK8sClient(kubeconfig string, opt kubernetes.KubeClientOptions) (k8sclient.Interface, error) { - return kubernetes.GetKubernetesClient(kubeconfig, opt) + secretString := secret.Data[secretVar] + resultCh <- Result{value: string(secretString), ok: true} } diff --git a/internal/pkg/composable/providers/kubernetessecrets/kubernetes_secrets_test.go b/internal/pkg/composable/providers/kubernetessecrets/kubernetes_secrets_test.go index 9924c84e6bc..78d632d6437 100644 --- a/internal/pkg/composable/providers/kubernetessecrets/kubernetes_secrets_test.go +++ b/internal/pkg/composable/providers/kubernetessecrets/kubernetes_secrets_test.go @@ -29,7 +29,7 @@ const ( pass = "testing_passpass" ) -func Test_K8sSecretsProvider_Fetch(t *testing.T) { +func Test_K8sSecretsProvider_FetchWrongSecret(t *testing.T) { client := k8sfake.NewSimpleClientset() secret := &v1.Secret{ TypeMeta: metav1.TypeMeta{ @@ -79,13 +79,131 @@ func Test_K8sSecretsProvider_Fetch(t *testing.T) { <-time.After(10 * time.Millisecond) } - val, found := fp.Fetch("kubernetes_secrets.test_namespace.testing_secret.secret_value") + val, found := fp.Fetch("kubernetes_secrets.test_namespace.testing_secretHACK.secret_value") + assert.False(t, found) + assert.EqualValues(t, val, "") +} + +func Test_K8sSecretsProvider_Fetch_Cache_Enabled(t *testing.T) { + client := k8sfake.NewSimpleClientset() + + ttlDelete, err := time.ParseDuration("1s") + require.NoError(t, err) + + refreshInterval, err := time.ParseDuration("100ms") + require.NoError(t, err) + + secret := &v1.Secret{ + TypeMeta: metav1.TypeMeta{ + Kind: "Secret", + APIVersion: "apps/v1beta1", + }, + ObjectMeta: metav1.ObjectMeta{ + Name: "testing_secret", + Namespace: ns, + }, + Data: map[string][]byte{ + "secret_value": []byte(pass), + }, + } + _, err = client.CoreV1().Secrets(ns).Create(context.Background(), secret, metav1.CreateOptions{}) + require.NoError(t, err) + + logger := logp.NewLogger("test_k8s_secrets") + + c := map[string]interface{}{ + "cache_refresh_interval": refreshInterval, + "cache_ttl": ttlDelete, + "cache_disable": false, + } + cfg, err := config.NewConfigFrom(c) + require.NoError(t, err) + + p, err := ContextProviderBuilder(logger, cfg, true) + require.NoError(t, err) + + fp, _ := p.(*contextProviderK8sSecrets) + + getK8sClientFunc = func(kubeconfig string, opt kubernetes.KubeClientOptions) (k8sclient.Interface, error) { + return client, nil + } + require.NoError(t, err) + + ctx, cancel := context.WithCancel(context.Background()) + defer cancel() + comm := ctesting.NewContextComm(ctx) + + go func() { + _ = fp.Run(ctx, comm) + }() + + for { + fp.clientMx.Lock() + client := fp.client + fp.clientMx.Unlock() + if client != nil { + break + } + <-time.After(10 * time.Millisecond) + } + + // Secret cache should be empty at start + fp.secretsCacheMx.Lock() + assert.Equal(t, 0, len(fp.secretsCache)) + fp.secretsCacheMx.Unlock() + + key := "kubernetes_secrets.test_namespace.testing_secret.secret_value" + + // Secret should be in the cache after this call + val, found := fp.Fetch(key) assert.True(t, found) assert.Equal(t, val, pass) + fp.secretsCacheMx.RLock() + assert.Equal(t, len(fp.secretsCache), 1) + assert.NotNil(t, fp.secretsCache[key]) + assert.NotZero(t, fp.secretsCache[key].lastAccess) + fp.secretsCacheMx.RUnlock() + + // Update the secret and check after TTL time, the secret value is correct + newPass := "new-pass" + secret = &v1.Secret{ + TypeMeta: metav1.TypeMeta{ + Kind: "Secret", + APIVersion: "apps/v1beta1", + }, + ObjectMeta: metav1.ObjectMeta{ + Name: "testing_secret", + Namespace: ns, + }, + Data: map[string][]byte{ + "secret_value": []byte(newPass), + }, + } + _, err = client.CoreV1().Secrets(ns).Update(context.Background(), secret, metav1.UpdateOptions{}) + require.NoError(t, err) + + // wait for ttl update + <-time.After(refreshInterval) + assert.Eventuallyf(t, func() bool { + val, found = fp.Fetch(key) + return found && val == newPass + }, refreshInterval*3, refreshInterval, "Failed to update the secret value after TTL update has passed.") + + // After TTL delete, secret should no longer be found in cache since it was never + // fetched during that time + <-time.After(ttlDelete) + assert.Eventuallyf(t, func() bool { + fp.secretsCacheMx.RLock() + size := len(fp.secretsCache) + fp.secretsCacheMx.RUnlock() + return size == 0 + }, ttlDelete*3, ttlDelete, "Failed to delete the secret after TTL delete has passed.") + } -func Test_K8sSecretsProvider_FetchWrongSecret(t *testing.T) { +func Test_K8sSecretsProvider_Fetch_Cache_Disabled(t *testing.T) { client := k8sfake.NewSimpleClientset() + secret := &v1.Secret{ TypeMeta: metav1.TypeMeta{ Kind: "Secret", @@ -103,7 +221,11 @@ func Test_K8sSecretsProvider_FetchWrongSecret(t *testing.T) { require.NoError(t, err) logger := logp.NewLogger("test_k8s_secrets") - cfg, err := config.NewConfigFrom(map[string]string{"a": "b"}) + + c := map[string]interface{}{ + "cache_disable": true, + } + cfg, err := config.NewConfigFrom(c) require.NoError(t, err) p, err := ContextProviderBuilder(logger, cfg, true) @@ -134,7 +256,37 @@ func Test_K8sSecretsProvider_FetchWrongSecret(t *testing.T) { <-time.After(10 * time.Millisecond) } - val, found := fp.Fetch("kubernetes_secrets.test_namespace.testing_secretHACK.secret_value") + key := "kubernetes_secrets.test_namespace.testing_secret.secret_value" + + // Secret should be in the cache after this call + val, found := fp.Fetch(key) + assert.True(t, found) + assert.Equal(t, val, pass) + + // Update the secret and check the result + newPass := "new-pass" + secret = &v1.Secret{ + TypeMeta: metav1.TypeMeta{ + Kind: "Secret", + APIVersion: "apps/v1beta1", + }, + ObjectMeta: metav1.ObjectMeta{ + Name: "testing_secret", + Namespace: ns, + }, + Data: map[string][]byte{ + "secret_value": []byte(newPass), + }, + } + _, err = client.CoreV1().Secrets(ns).Update(context.Background(), secret, metav1.UpdateOptions{}) + require.NoError(t, err) + + val, found = fp.Fetch(key) + assert.True(t, found) + assert.Equal(t, val, newPass) + + // Check key that does not exist + val, found = fp.Fetch(key + "doesnotexist") assert.False(t, found) - assert.EqualValues(t, val, "") + assert.Equal(t, "", val) } diff --git a/magefile.go b/magefile.go index fe320754b91..bd00b199040 100644 --- a/magefile.go +++ b/magefile.go @@ -1646,7 +1646,7 @@ func (Integration) TestOnRemote(ctx context.Context) error { extraFlags = append(extraFlags, goTestFlags...) } extraFlags = append(extraFlags, "-test.shuffle", "on", - "-test.timeout", "0", "-test.run", "^("+strings.Join(packageTests, "|")+")$") + "-test.timeout", "2h", "-test.run", "^("+strings.Join(packageTests, "|")+")$") params := mage.GoTestArgs{ LogName: testName, OutputFile: fileName + ".out", @@ -1844,6 +1844,7 @@ func createTestRunner(matrix bool, singleTest string, goTestFlags string, batche DiagnosticsDir: diagDir, StateDir: ".integration-cache", Platforms: testPlatforms(), + Groups: testGroups(), Matrix: matrix, SingleTest: singleTest, VerboseMode: mg.Verbose(), @@ -1936,6 +1937,20 @@ func testPlatforms() []string { return platforms } +func testGroups() []string { + groupsStr := os.Getenv("TEST_GROUPS") + if groupsStr == "" { + return nil + } + var groups []string + for _, g := range strings.Split(groupsStr, " ") { + if g != "" { + groups = append(groups, g) + } + } + return groups +} + // Pre-requisite: user must have the gcloud CLI installed func authGCP(ctx context.Context) error { // We only need the service account token to exist. diff --git a/pkg/component/component.go b/pkg/component/component.go index 18060b645fd..d9100a6dd98 100644 --- a/pkg/component/component.go +++ b/pkg/component/component.go @@ -156,7 +156,11 @@ type Component struct { // Err used when there is an error with running this input. Used by the runtime to alert // the reason that all of these units are failed. - Err error `yaml:"error,omitempty"` + Err error `yaml:"-"` + // the YAML marshaller won't handle `error` values, since they don't implement MarshalYAML() + // the Component's own MarshalYAML method needs to handle this, and place any error values here instead of `Err`, + // so they can properly be rendered as a string + ErrMsg string `yaml:"error,omitempty"` // InputSpec on how the input should run. (not set when ShipperSpec set) InputSpec *InputRuntimeSpec `yaml:"input_spec,omitempty"` @@ -187,6 +191,13 @@ type Component struct { ShipperRef *ShipperReference `yaml:"shipper,omitempty"` } +func (c Component) MarshalYAML() (interface{}, error) { + if c.Err != nil { + c.ErrMsg = c.Err.Error() + } + return c, nil +} + // Type returns the type of the component. func (c *Component) Type() string { if c.InputSpec != nil { diff --git a/pkg/component/component_test.go b/pkg/component/component_test.go index 00c4d1c63cb..8d42ab64427 100644 --- a/pkg/component/component_test.go +++ b/pkg/component/component_test.go @@ -36,6 +36,31 @@ import ( "github.com/stretchr/testify/require" ) +// fake error type used for the test below +type testErr struct { + data string +} + +func (t testErr) Error() string { + return t.data +} + +func TestComponentMarshalError(t *testing.T) { + testComponent := Component{ + ID: "test-device", + Err: testErr{data: "test error value"}, + } + componentConfigs := []Component{testComponent} + + outData, err := yaml.Marshal(struct { + Components []Component `yaml:"components"` + }{ + Components: componentConfigs, + }) + require.NoError(t, err) + require.Contains(t, string(outData), "test error value") +} + func TestToComponents(t *testing.T) { linuxAMD64Platform := PlatformDetail{ Platform: Platform{ diff --git a/pkg/component/runtime/manager_test.go b/pkg/component/runtime/manager_test.go index 51bb941bca6..612a85b3eb0 100644 --- a/pkg/component/runtime/manager_test.go +++ b/pkg/component/runtime/manager_test.go @@ -495,7 +495,8 @@ func TestManager_FakeInput_Features(t *testing.T) { func TestManager_FakeInput_APM(t *testing.T) { testPaths(t) - ctx, cancel := context.WithCancel(context.Background()) + timeout := 30 * time.Second + ctx, cancel := context.WithTimeout(context.Background(), timeout) defer cancel() agentInfo, _ := info.NewAgentInfo(ctx, true) @@ -550,8 +551,6 @@ func TestManager_FakeInput_APM(t *testing.T) { subscriptionCtx, subCancel := context.WithCancel(context.Background()) defer subCancel() - subscriptionErrCh := make(chan error) - doneCh := make(chan struct{}) initialAPMConfig := &proto.APMConfig{ Elastic: &proto.ElasticAPM{ @@ -581,245 +580,152 @@ func TestManager_FakeInput_APM(t *testing.T) { }, } - go func() { - sub := m.Subscribe(subscriptionCtx, compID) - var healthIteration int - var retrievedApmConfig *proto.APMConfig - for { - select { - case <-subscriptionCtx.Done(): - return - case componentState := <-sub.Ch(): - t.Logf("component state changed: %+v", componentState) + sub := m.Subscribe(subscriptionCtx, compID) - if componentState.State == client.UnitStateFailed { - subscriptionErrCh <- fmt.Errorf("component failed: %s", componentState.Message) - return - } - - unit, ok := componentState.Units[ComponentUnitKey{UnitType: client.UnitTypeInput, UnitID: "fake-input"}] - if !ok { - subscriptionErrCh <- errors.New("unit missing: fake-input") - return - } - - switch unit.State { - case client.UnitStateFailed: - subscriptionErrCh <- fmt.Errorf("unit failed: %s", unit.Message) - - case client.UnitStateHealthy: - healthIteration++ - t.Logf("Healthy iteration %d starting at %s", healthIteration, time.Now()) - switch healthIteration { - case 1: // yes, it's starting on 1 - comp.Component = &proto.Component{ - ApmConfig: initialAPMConfig, - } - m.Update(component.Model{Components: []component.Component{comp}}) - err := <-m.errCh - if err != nil { - subscriptionErrCh <- fmt.Errorf("[case %d]: failed to update component: %w", - healthIteration, err) - return - } + m.Update(component.Model{Components: []component.Component{comp}}) + err = <-m.errCh + require.NoError(t, err, "manager Update call must succeed") + + // testStep tracks how far into the test sequence we've progressed. + // 0: When unit is healthy, set initialAPMConfig + // 1: When initialAPMConfig is active, set modifiedAPMConfig + // 2: When modifiedAPMConfig is active, clear all APMConfig + // 3: When APM config is empty again, succeed + var testStep int +STATELOOP: + for { + select { + case <-ctx.Done(): + require.Fail(t, "timed out waiting for state update") + case componentState := <-sub.Ch(): + t.Logf("component state changed: %+v", componentState) - // check if config sent on iteration 1 was set - case 2: - // In the previous iteration, the (fake) component has received a CheckinExpected - // message to propagate the APM configuration. In this iteration we are about to - // retrieve the APM configuration from the same component via the retrieve_apm_config - // action. Within the component, which is running as a separate process, actions - // and CheckinExpected messages are processed concurrently. We need some way to wait - // a reasonably short amount of time for the CheckinExpected message to be applied by the - // component (thus setting the APM config) before we query the same component - // for apm config information. We accomplish this via assert.Eventually. - // We also send a modified APM config to see that the component updates correctly and - // reports the new config in the next iteration. - assert.Eventuallyf(t, func() bool { - // check the component - res, err := m.PerformAction( - context.Background(), - comp, - comp.Units[0], - fakecmp.ActionRetrieveAPMConfig, - nil) - if err != nil { - subscriptionErrCh <- fmt.Errorf("[case %d]: failed to PerformAction %s: %w", - healthIteration, fakecmp.ActionRetrieveAPMConfig, err) - return false - } - retrievedApmConfig, err = extractAPMConfigFromActionResult(t, res) - if err != nil { - subscriptionErrCh <- fmt.Errorf("[case %d]: failed to retrieve APM Config from ActionResult %s: %w", - healthIteration, fakecmp.ActionRetrieveAPMConfig, err) - return false - } - return gproto.Equal(initialAPMConfig, retrievedApmConfig) - }, 1*time.Second, 100*time.Millisecond, "APM config was not received by component. expected: %s actual: %s", initialAPMConfig, retrievedApmConfig) + require.NotEqual(t, client.UnitStateFailed, componentState.State, "component failed: %v", componentState.Message) - comp.Component = &proto.Component{ - ApmConfig: modifiedAPMConfig, - } - m.Update(component.Model{Components: []component.Component{comp}}) - err := <-m.errCh - if err != nil { - subscriptionErrCh <- fmt.Errorf("[case %d]: failed to update component: %w", - healthIteration, err) - return - } - // Set a new APM config to check that we update correctly - case 3: - // In the previous iteration, the (fake) component has received another CheckinExpected - // message to propagate a modified APM configuration. In this iteration we are about to - // retrieve the APM configuration from the same component via the retrieve_apm_config - // action. - assert.Eventuallyf(t, func() bool { - // check the component - res, err := m.PerformAction( - context.Background(), - comp, - comp.Units[0], - fakecmp.ActionRetrieveAPMConfig, - nil) - if err != nil { - subscriptionErrCh <- fmt.Errorf("[case %d]: failed to PerformAction %s: %w", - healthIteration, fakecmp.ActionRetrieveAPMConfig, err) - return false - } + unit, ok := componentState.Units[ComponentUnitKey{UnitType: client.UnitTypeInput, UnitID: "fake-input"}] + require.True(t, ok, "input unit missing: fake-input") - retrievedApmConfig, err = extractAPMConfigFromActionResult(t, res) - if err != nil { - subscriptionErrCh <- fmt.Errorf("[case %d]: failed to retrieve APM Config from ActionResult %s: %w", - healthIteration, fakecmp.ActionRetrieveAPMConfig, err) - return false - } + if unit.State == client.UnitStateStarting || unit.State == client.UnitStateConfiguring { + // Unit is still starting or reconfiguring, skip to next update + continue STATELOOP + } - return gproto.Equal(modifiedAPMConfig, retrievedApmConfig) - }, 1*time.Second, 100*time.Millisecond, "APM config was not received by component. expected: %s actual: %s", modifiedAPMConfig, retrievedApmConfig) + require.Equal(t, client.UnitStateHealthy, unit.State, "unit isn't healthy: %v", unit.Message) - comp.Component = &proto.Component{ - ApmConfig: nil, - } - m.Update(component.Model{Components: []component.Component{comp}}) - err := <-m.errCh - if err != nil { - subscriptionErrCh <- fmt.Errorf("[case %d]: failed to update component: %w", - healthIteration, err) - return - } + t.Logf("Healthy iteration %d starting at %s", testStep, time.Now()) + switch testStep { + case 0: + // Add an APM config to the component config and send an update. + comp.Component = &proto.Component{ + ApmConfig: initialAPMConfig, + } + m.Update(component.Model{Components: []component.Component{comp}}) + err = <-m.errCh + require.NoError(t, err, "manager Update call must succeed") + + case 1: + // First, check that the APM config set in the previous step is + // visible, if not then we need to wait for a future update + if componentState.Component == nil { + continue STATELOOP + } - case 4: - // In the previous iteration, the (fake) component has received another CheckinExpected - // message to propagate a nil APM configuration. In this iteration we are about to - // retrieve the APM configuration from the same component via the retrieve_apm_config - // action. - assert.Eventuallyf(t, func() bool { - // check the component - res, err := m.PerformAction( - context.Background(), - comp, - comp.Units[0], - fakecmp.ActionRetrieveAPMConfig, - nil) - if err != nil { - subscriptionErrCh <- fmt.Errorf("[case %d]: failed to PerformAction %s: %w", - healthIteration, fakecmp.ActionRetrieveAPMConfig, err) - return false - } + // The APM config has propagated to the component state, now make sure + // it's visible when retrieving via action. + // We use require.Eventually because the new value isn't guaranteed + // to immediately propagate via Action even after it appears in the + // component checkin. - retrievedApmConfig, err = extractAPMConfigFromActionResult(t, res) - if err != nil { - subscriptionErrCh <- fmt.Errorf("[case %d]: failed to retrieve APM Config from ActionResult %s: %w", - healthIteration, fakecmp.ActionRetrieveAPMConfig, err) - return false - } - return retrievedApmConfig == nil - }, 1*time.Second, 100*time.Millisecond, "APM config was not received by component. expected: nil actual: %s", retrievedApmConfig) + require.Eventually(t, + func() bool { + retrievedAPMConfig := fetchAPMConfigWithAction(t, ctx, m, comp) + return gproto.Equal(initialAPMConfig, retrievedAPMConfig) + }, + 3*time.Second, + 50*time.Millisecond, + "Updated APM config should be reported by Actions") - doneCh <- struct{}{} - } + // Config matches, we now try updating to a new APM config + comp.Component = &proto.Component{ + ApmConfig: modifiedAPMConfig, + } + m.Update(component.Model{Components: []component.Component{comp}}) + err = <-m.errCh + require.NoError(t, err, "manager Update call must succeed") - case client.UnitStateStarting: - // acceptable + case 2: + require.NotNil(t, componentState.Component, "ApmConfig must not be nil") - case client.UnitStateConfiguring: - // set unit back to healthy, so other cases will run. - comp.Units[0].Config = component.MustExpectedConfig(map[string]interface{}{ - "type": "fake", - "state": int(client.UnitStateHealthy), - "message": "Fake Healthy", - }) + require.Eventually(t, + func() bool { + retrievedAPMConfig := fetchAPMConfigWithAction(t, ctx, m, comp) + return gproto.Equal(modifiedAPMConfig, retrievedAPMConfig) + }, + 3*time.Second, + 50*time.Millisecond, + "Updated APM config should be reported by Actions") - m.Update(component.Model{Components: []component.Component{comp}}) - err := <-m.errCh - if err != nil { - t.Logf("error updating component state to health: %v", err) + // Both configs were reported correctly, now clear the APM config + comp.Component = &proto.Component{ + ApmConfig: nil, + } - subscriptionErrCh <- fmt.Errorf("failed to update component: %w", err) - } + m.Update(component.Model{Components: []component.Component{comp}}) + err = <-m.errCh + require.NoError(t, err, "manager Update call must succeed") - default: - // unexpected state that should not have occurred - subscriptionErrCh <- fmt.Errorf("unit reported unexpected state: %v", - unit.State) + case 3: + if componentState.Component != nil && componentState.Component.ApmConfig != nil { + // APM config is still present, wait for next update + continue STATELOOP } + require.Eventually(t, + func() bool { + retrievedAPMConfig := fetchAPMConfigWithAction(t, ctx, m, comp) + return retrievedAPMConfig == nil + }, + 3*time.Second, + 50*time.Millisecond, + "Final APM config should be nil") + + // Success, end the loop + break STATELOOP } + testStep++ } - }() + } - defer drainErrChan(managerErrCh) - defer drainErrChan(subscriptionErrCh) + subCancel() + cancel() - m.Update(component.Model{Components: []component.Component{comp}}) - err = <-m.errCh + err = <-managerErrCh require.NoError(t, err) - - timeout := 30 * time.Second - timeoutTimer := time.NewTimer(timeout) - defer timeoutTimer.Stop() - - // Wait for a success, an error or time out - for { - select { - case <-timeoutTimer.C: - t.Fatalf("timed out after %s", timeout) - case err := <-managerErrCh: - require.NoError(t, err) - case err := <-subscriptionErrCh: - require.NoError(t, err) - case <-doneCh: - subCancel() - cancel() - - err = <-managerErrCh - require.NoError(t, err) - return - } - } } -func extractAPMConfigFromActionResult(t *testing.T, res map[string]interface{}) (*proto.APMConfig, error) { +func fetchAPMConfigWithAction(t *testing.T, ctx context.Context, m *Manager, comp component.Component) *proto.APMConfig { + res, err := m.PerformAction( + context.Background(), + comp, + comp.Units[0], + fakecmp.ActionRetrieveAPMConfig, + nil) + require.NoError(t, err, "failed to retrieve APM config") + apmCfg, ok := res["apm"] - if !ok { - return nil, fmt.Errorf("ActionResult for %s does not contain top level key %s", fakecmp.ActionRetrieveAPMConfig, "apm") - } + require.True(t, ok, "ActionResult must contain top-level 'apm' key") if apmCfg == nil { // the APM config is not set on the component - return nil, nil + return nil } jsonApmConfig, ok := apmCfg.(string) - if !ok { - return nil, fmt.Errorf("ActionResult for %s does not contain a string value: %T", fakecmp.ActionRetrieveAPMConfig, apmCfg) - } + require.True(t, ok, "'apm' key must contain a string") retrievedApmConfig := new(proto.APMConfig) - err := protojson.Unmarshal([]byte(jsonApmConfig), retrievedApmConfig) - if err != nil { - return nil, fmt.Errorf("error unmarshaling apmconfig %s: %w", jsonApmConfig, err) - } - return retrievedApmConfig, nil + err = protojson.Unmarshal([]byte(jsonApmConfig), retrievedApmConfig) + require.NoError(t, err, "'apm' key must contain valid json", jsonApmConfig) + return retrievedApmConfig } func TestManager_FakeInput_Limits(t *testing.T) { diff --git a/pkg/control/v2/cproto/control_v2.pb.go b/pkg/control/v2/cproto/control_v2.pb.go index 77a290add56..cdb6456afd0 100644 --- a/pkg/control/v2/cproto/control_v2.pb.go +++ b/pkg/control/v2/cproto/control_v2.pb.go @@ -1166,7 +1166,7 @@ type UpgradeDetailsMetadata struct { // If the upgrade is a scheduled upgrade, the timestamp of when the // upgrade is expected to start. - ScheduledAt *timestamppb.Timestamp `protobuf:"bytes,1,opt,name=scheduled_at,json=scheduledAt,proto3" json:"scheduled_at,omitempty"` + ScheduledAt string `protobuf:"bytes,1,opt,name=scheduled_at,json=scheduledAt,proto3" json:"scheduled_at,omitempty"` // If the upgrade is in the UPG_DOWNLOADING state, the percentage of // the Elastic Agent artifact that has already been downloaded, to // serve as an indicator of download progress. @@ -1175,6 +1175,12 @@ type UpgradeDetailsMetadata struct { FailedState string `protobuf:"bytes,3,opt,name=failed_state,json=failedState,proto3" json:"failed_state,omitempty"` // Any error encountered during the upgrade process. ErrorMsg string `protobuf:"bytes,4,opt,name=error_msg,json=errorMsg,proto3" json:"error_msg,omitempty"` + // Any error message that is a result of a retryable upgrade + // step, e.g. the download step, being retried. + RetryErrorMsg string `protobuf:"bytes,5,opt,name=retry_error_msg,json=retryErrorMsg,proto3" json:"retry_error_msg,omitempty"` + // The deadline until when a retryable upgrade step, e.g. the download + // step, will be retried. + RetryUntil string `protobuf:"bytes,6,opt,name=retry_until,json=retryUntil,proto3" json:"retry_until,omitempty"` } func (x *UpgradeDetailsMetadata) Reset() { @@ -1209,11 +1215,11 @@ func (*UpgradeDetailsMetadata) Descriptor() ([]byte, []int) { return file_control_v2_proto_rawDescGZIP(), []int{11} } -func (x *UpgradeDetailsMetadata) GetScheduledAt() *timestamppb.Timestamp { +func (x *UpgradeDetailsMetadata) GetScheduledAt() string { if x != nil { return x.ScheduledAt } - return nil + return "" } func (x *UpgradeDetailsMetadata) GetDownloadPercent() float32 { @@ -1237,6 +1243,20 @@ func (x *UpgradeDetailsMetadata) GetErrorMsg() string { return "" } +func (x *UpgradeDetailsMetadata) GetRetryErrorMsg() string { + if x != nil { + return x.RetryErrorMsg + } + return "" +} + +func (x *UpgradeDetailsMetadata) GetRetryUntil() string { + if x != nil { + return x.RetryUntil + } + return "" +} + // DiagnosticFileResult is a file result from a diagnostic result. type DiagnosticFileResult struct { state protoimpl.MessageState @@ -2016,170 +2036,173 @@ var file_control_v2_proto_rawDesc = []byte{ 0x0a, 0x08, 0x6d, 0x65, 0x74, 0x61, 0x64, 0x61, 0x74, 0x61, 0x18, 0x04, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1e, 0x2e, 0x63, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x2e, 0x55, 0x70, 0x67, 0x72, 0x61, 0x64, 0x65, 0x44, 0x65, 0x74, 0x61, 0x69, 0x6c, 0x73, 0x4d, 0x65, 0x74, 0x61, 0x64, 0x61, 0x74, 0x61, - 0x52, 0x08, 0x6d, 0x65, 0x74, 0x61, 0x64, 0x61, 0x74, 0x61, 0x22, 0xc2, 0x01, 0x0a, 0x16, 0x55, + 0x52, 0x08, 0x6d, 0x65, 0x74, 0x61, 0x64, 0x61, 0x74, 0x61, 0x22, 0xef, 0x01, 0x0a, 0x16, 0x55, 0x70, 0x67, 0x72, 0x61, 0x64, 0x65, 0x44, 0x65, 0x74, 0x61, 0x69, 0x6c, 0x73, 0x4d, 0x65, 0x74, - 0x61, 0x64, 0x61, 0x74, 0x61, 0x12, 0x3d, 0x0a, 0x0c, 0x73, 0x63, 0x68, 0x65, 0x64, 0x75, 0x6c, - 0x65, 0x64, 0x5f, 0x61, 0x74, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1a, 0x2e, 0x67, 0x6f, - 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x54, 0x69, - 0x6d, 0x65, 0x73, 0x74, 0x61, 0x6d, 0x70, 0x52, 0x0b, 0x73, 0x63, 0x68, 0x65, 0x64, 0x75, 0x6c, - 0x65, 0x64, 0x41, 0x74, 0x12, 0x29, 0x0a, 0x10, 0x64, 0x6f, 0x77, 0x6e, 0x6c, 0x6f, 0x61, 0x64, - 0x5f, 0x70, 0x65, 0x72, 0x63, 0x65, 0x6e, 0x74, 0x18, 0x02, 0x20, 0x01, 0x28, 0x02, 0x52, 0x0f, - 0x64, 0x6f, 0x77, 0x6e, 0x6c, 0x6f, 0x61, 0x64, 0x50, 0x65, 0x72, 0x63, 0x65, 0x6e, 0x74, 0x12, - 0x21, 0x0a, 0x0c, 0x66, 0x61, 0x69, 0x6c, 0x65, 0x64, 0x5f, 0x73, 0x74, 0x61, 0x74, 0x65, 0x18, - 0x03, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0b, 0x66, 0x61, 0x69, 0x6c, 0x65, 0x64, 0x53, 0x74, 0x61, - 0x74, 0x65, 0x12, 0x1b, 0x0a, 0x09, 0x65, 0x72, 0x72, 0x6f, 0x72, 0x5f, 0x6d, 0x73, 0x67, 0x18, - 0x04, 0x20, 0x01, 0x28, 0x09, 0x52, 0x08, 0x65, 0x72, 0x72, 0x6f, 0x72, 0x4d, 0x73, 0x67, 0x22, - 0xdf, 0x01, 0x0a, 0x14, 0x44, 0x69, 0x61, 0x67, 0x6e, 0x6f, 0x73, 0x74, 0x69, 0x63, 0x46, 0x69, - 0x6c, 0x65, 0x52, 0x65, 0x73, 0x75, 0x6c, 0x74, 0x12, 0x12, 0x0a, 0x04, 0x6e, 0x61, 0x6d, 0x65, - 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x12, 0x1a, 0x0a, 0x08, - 0x66, 0x69, 0x6c, 0x65, 0x6e, 0x61, 0x6d, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x08, - 0x66, 0x69, 0x6c, 0x65, 0x6e, 0x61, 0x6d, 0x65, 0x12, 0x20, 0x0a, 0x0b, 0x64, 0x65, 0x73, 0x63, - 0x72, 0x69, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x18, 0x03, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0b, 0x64, - 0x65, 0x73, 0x63, 0x72, 0x69, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x12, 0x21, 0x0a, 0x0c, 0x63, 0x6f, - 0x6e, 0x74, 0x65, 0x6e, 0x74, 0x5f, 0x74, 0x79, 0x70, 0x65, 0x18, 0x04, 0x20, 0x01, 0x28, 0x09, - 0x52, 0x0b, 0x63, 0x6f, 0x6e, 0x74, 0x65, 0x6e, 0x74, 0x54, 0x79, 0x70, 0x65, 0x12, 0x18, 0x0a, - 0x07, 0x63, 0x6f, 0x6e, 0x74, 0x65, 0x6e, 0x74, 0x18, 0x05, 0x20, 0x01, 0x28, 0x0c, 0x52, 0x07, - 0x63, 0x6f, 0x6e, 0x74, 0x65, 0x6e, 0x74, 0x12, 0x38, 0x0a, 0x09, 0x67, 0x65, 0x6e, 0x65, 0x72, - 0x61, 0x74, 0x65, 0x64, 0x18, 0x06, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1a, 0x2e, 0x67, 0x6f, 0x6f, - 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x54, 0x69, 0x6d, - 0x65, 0x73, 0x74, 0x61, 0x6d, 0x70, 0x52, 0x09, 0x67, 0x65, 0x6e, 0x65, 0x72, 0x61, 0x74, 0x65, - 0x64, 0x22, 0x6c, 0x0a, 0x16, 0x44, 0x69, 0x61, 0x67, 0x6e, 0x6f, 0x73, 0x74, 0x69, 0x63, 0x41, - 0x67, 0x65, 0x6e, 0x74, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x52, 0x0a, 0x12, 0x61, - 0x64, 0x64, 0x69, 0x74, 0x69, 0x6f, 0x6e, 0x61, 0x6c, 0x5f, 0x6d, 0x65, 0x74, 0x72, 0x69, 0x63, - 0x73, 0x18, 0x01, 0x20, 0x03, 0x28, 0x0e, 0x32, 0x23, 0x2e, 0x63, 0x70, 0x72, 0x6f, 0x74, 0x6f, - 0x2e, 0x41, 0x64, 0x64, 0x69, 0x74, 0x69, 0x6f, 0x6e, 0x61, 0x6c, 0x44, 0x69, 0x61, 0x67, 0x6e, - 0x6f, 0x73, 0x74, 0x69, 0x63, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x52, 0x11, 0x61, 0x64, - 0x64, 0x69, 0x74, 0x69, 0x6f, 0x6e, 0x61, 0x6c, 0x4d, 0x65, 0x74, 0x72, 0x69, 0x63, 0x73, 0x22, - 0xb5, 0x01, 0x0a, 0x1b, 0x44, 0x69, 0x61, 0x67, 0x6e, 0x6f, 0x73, 0x74, 0x69, 0x63, 0x43, 0x6f, - 0x6d, 0x70, 0x6f, 0x6e, 0x65, 0x6e, 0x74, 0x73, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, - 0x42, 0x0a, 0x0a, 0x63, 0x6f, 0x6d, 0x70, 0x6f, 0x6e, 0x65, 0x6e, 0x74, 0x73, 0x18, 0x01, 0x20, - 0x03, 0x28, 0x0b, 0x32, 0x22, 0x2e, 0x63, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x2e, 0x44, 0x69, 0x61, - 0x67, 0x6e, 0x6f, 0x73, 0x74, 0x69, 0x63, 0x43, 0x6f, 0x6d, 0x70, 0x6f, 0x6e, 0x65, 0x6e, 0x74, - 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x52, 0x0a, 0x63, 0x6f, 0x6d, 0x70, 0x6f, 0x6e, 0x65, - 0x6e, 0x74, 0x73, 0x12, 0x52, 0x0a, 0x12, 0x61, 0x64, 0x64, 0x69, 0x74, 0x69, 0x6f, 0x6e, 0x61, - 0x6c, 0x5f, 0x6d, 0x65, 0x74, 0x72, 0x69, 0x63, 0x73, 0x18, 0x02, 0x20, 0x03, 0x28, 0x0e, 0x32, - 0x23, 0x2e, 0x63, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x2e, 0x41, 0x64, 0x64, 0x69, 0x74, 0x69, 0x6f, - 0x6e, 0x61, 0x6c, 0x44, 0x69, 0x61, 0x67, 0x6e, 0x6f, 0x73, 0x74, 0x69, 0x63, 0x52, 0x65, 0x71, - 0x75, 0x65, 0x73, 0x74, 0x52, 0x11, 0x61, 0x64, 0x64, 0x69, 0x74, 0x69, 0x6f, 0x6e, 0x61, 0x6c, - 0x4d, 0x65, 0x74, 0x72, 0x69, 0x63, 0x73, 0x22, 0x3f, 0x0a, 0x1a, 0x44, 0x69, 0x61, 0x67, 0x6e, - 0x6f, 0x73, 0x74, 0x69, 0x63, 0x43, 0x6f, 0x6d, 0x70, 0x6f, 0x6e, 0x65, 0x6e, 0x74, 0x52, 0x65, - 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x21, 0x0a, 0x0c, 0x63, 0x6f, 0x6d, 0x70, 0x6f, 0x6e, 0x65, - 0x6e, 0x74, 0x5f, 0x69, 0x64, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0b, 0x63, 0x6f, 0x6d, - 0x70, 0x6f, 0x6e, 0x65, 0x6e, 0x74, 0x49, 0x64, 0x22, 0x51, 0x0a, 0x17, 0x44, 0x69, 0x61, 0x67, - 0x6e, 0x6f, 0x73, 0x74, 0x69, 0x63, 0x41, 0x67, 0x65, 0x6e, 0x74, 0x52, 0x65, 0x73, 0x70, 0x6f, - 0x6e, 0x73, 0x65, 0x12, 0x36, 0x0a, 0x07, 0x72, 0x65, 0x73, 0x75, 0x6c, 0x74, 0x73, 0x18, 0x01, - 0x20, 0x03, 0x28, 0x0b, 0x32, 0x1c, 0x2e, 0x63, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x2e, 0x44, 0x69, - 0x61, 0x67, 0x6e, 0x6f, 0x73, 0x74, 0x69, 0x63, 0x46, 0x69, 0x6c, 0x65, 0x52, 0x65, 0x73, 0x75, - 0x6c, 0x74, 0x52, 0x07, 0x72, 0x65, 0x73, 0x75, 0x6c, 0x74, 0x73, 0x22, 0x82, 0x01, 0x0a, 0x15, + 0x61, 0x64, 0x61, 0x74, 0x61, 0x12, 0x21, 0x0a, 0x0c, 0x73, 0x63, 0x68, 0x65, 0x64, 0x75, 0x6c, + 0x65, 0x64, 0x5f, 0x61, 0x74, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0b, 0x73, 0x63, 0x68, + 0x65, 0x64, 0x75, 0x6c, 0x65, 0x64, 0x41, 0x74, 0x12, 0x29, 0x0a, 0x10, 0x64, 0x6f, 0x77, 0x6e, + 0x6c, 0x6f, 0x61, 0x64, 0x5f, 0x70, 0x65, 0x72, 0x63, 0x65, 0x6e, 0x74, 0x18, 0x02, 0x20, 0x01, + 0x28, 0x02, 0x52, 0x0f, 0x64, 0x6f, 0x77, 0x6e, 0x6c, 0x6f, 0x61, 0x64, 0x50, 0x65, 0x72, 0x63, + 0x65, 0x6e, 0x74, 0x12, 0x21, 0x0a, 0x0c, 0x66, 0x61, 0x69, 0x6c, 0x65, 0x64, 0x5f, 0x73, 0x74, + 0x61, 0x74, 0x65, 0x18, 0x03, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0b, 0x66, 0x61, 0x69, 0x6c, 0x65, + 0x64, 0x53, 0x74, 0x61, 0x74, 0x65, 0x12, 0x1b, 0x0a, 0x09, 0x65, 0x72, 0x72, 0x6f, 0x72, 0x5f, + 0x6d, 0x73, 0x67, 0x18, 0x04, 0x20, 0x01, 0x28, 0x09, 0x52, 0x08, 0x65, 0x72, 0x72, 0x6f, 0x72, + 0x4d, 0x73, 0x67, 0x12, 0x26, 0x0a, 0x0f, 0x72, 0x65, 0x74, 0x72, 0x79, 0x5f, 0x65, 0x72, 0x72, + 0x6f, 0x72, 0x5f, 0x6d, 0x73, 0x67, 0x18, 0x05, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0d, 0x72, 0x65, + 0x74, 0x72, 0x79, 0x45, 0x72, 0x72, 0x6f, 0x72, 0x4d, 0x73, 0x67, 0x12, 0x1f, 0x0a, 0x0b, 0x72, + 0x65, 0x74, 0x72, 0x79, 0x5f, 0x75, 0x6e, 0x74, 0x69, 0x6c, 0x18, 0x06, 0x20, 0x01, 0x28, 0x09, + 0x52, 0x0a, 0x72, 0x65, 0x74, 0x72, 0x79, 0x55, 0x6e, 0x74, 0x69, 0x6c, 0x22, 0xdf, 0x01, 0x0a, + 0x14, 0x44, 0x69, 0x61, 0x67, 0x6e, 0x6f, 0x73, 0x74, 0x69, 0x63, 0x46, 0x69, 0x6c, 0x65, 0x52, + 0x65, 0x73, 0x75, 0x6c, 0x74, 0x12, 0x12, 0x0a, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x18, 0x01, 0x20, + 0x01, 0x28, 0x09, 0x52, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x12, 0x1a, 0x0a, 0x08, 0x66, 0x69, 0x6c, + 0x65, 0x6e, 0x61, 0x6d, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x08, 0x66, 0x69, 0x6c, + 0x65, 0x6e, 0x61, 0x6d, 0x65, 0x12, 0x20, 0x0a, 0x0b, 0x64, 0x65, 0x73, 0x63, 0x72, 0x69, 0x70, + 0x74, 0x69, 0x6f, 0x6e, 0x18, 0x03, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0b, 0x64, 0x65, 0x73, 0x63, + 0x72, 0x69, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x12, 0x21, 0x0a, 0x0c, 0x63, 0x6f, 0x6e, 0x74, 0x65, + 0x6e, 0x74, 0x5f, 0x74, 0x79, 0x70, 0x65, 0x18, 0x04, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0b, 0x63, + 0x6f, 0x6e, 0x74, 0x65, 0x6e, 0x74, 0x54, 0x79, 0x70, 0x65, 0x12, 0x18, 0x0a, 0x07, 0x63, 0x6f, + 0x6e, 0x74, 0x65, 0x6e, 0x74, 0x18, 0x05, 0x20, 0x01, 0x28, 0x0c, 0x52, 0x07, 0x63, 0x6f, 0x6e, + 0x74, 0x65, 0x6e, 0x74, 0x12, 0x38, 0x0a, 0x09, 0x67, 0x65, 0x6e, 0x65, 0x72, 0x61, 0x74, 0x65, + 0x64, 0x18, 0x06, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1a, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, + 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x54, 0x69, 0x6d, 0x65, 0x73, 0x74, + 0x61, 0x6d, 0x70, 0x52, 0x09, 0x67, 0x65, 0x6e, 0x65, 0x72, 0x61, 0x74, 0x65, 0x64, 0x22, 0x6c, + 0x0a, 0x16, 0x44, 0x69, 0x61, 0x67, 0x6e, 0x6f, 0x73, 0x74, 0x69, 0x63, 0x41, 0x67, 0x65, 0x6e, + 0x74, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x52, 0x0a, 0x12, 0x61, 0x64, 0x64, 0x69, + 0x74, 0x69, 0x6f, 0x6e, 0x61, 0x6c, 0x5f, 0x6d, 0x65, 0x74, 0x72, 0x69, 0x63, 0x73, 0x18, 0x01, + 0x20, 0x03, 0x28, 0x0e, 0x32, 0x23, 0x2e, 0x63, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x2e, 0x41, 0x64, + 0x64, 0x69, 0x74, 0x69, 0x6f, 0x6e, 0x61, 0x6c, 0x44, 0x69, 0x61, 0x67, 0x6e, 0x6f, 0x73, 0x74, + 0x69, 0x63, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x52, 0x11, 0x61, 0x64, 0x64, 0x69, 0x74, + 0x69, 0x6f, 0x6e, 0x61, 0x6c, 0x4d, 0x65, 0x74, 0x72, 0x69, 0x63, 0x73, 0x22, 0xb5, 0x01, 0x0a, + 0x1b, 0x44, 0x69, 0x61, 0x67, 0x6e, 0x6f, 0x73, 0x74, 0x69, 0x63, 0x43, 0x6f, 0x6d, 0x70, 0x6f, + 0x6e, 0x65, 0x6e, 0x74, 0x73, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x42, 0x0a, 0x0a, + 0x63, 0x6f, 0x6d, 0x70, 0x6f, 0x6e, 0x65, 0x6e, 0x74, 0x73, 0x18, 0x01, 0x20, 0x03, 0x28, 0x0b, + 0x32, 0x22, 0x2e, 0x63, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x2e, 0x44, 0x69, 0x61, 0x67, 0x6e, 0x6f, + 0x73, 0x74, 0x69, 0x63, 0x43, 0x6f, 0x6d, 0x70, 0x6f, 0x6e, 0x65, 0x6e, 0x74, 0x52, 0x65, 0x71, + 0x75, 0x65, 0x73, 0x74, 0x52, 0x0a, 0x63, 0x6f, 0x6d, 0x70, 0x6f, 0x6e, 0x65, 0x6e, 0x74, 0x73, + 0x12, 0x52, 0x0a, 0x12, 0x61, 0x64, 0x64, 0x69, 0x74, 0x69, 0x6f, 0x6e, 0x61, 0x6c, 0x5f, 0x6d, + 0x65, 0x74, 0x72, 0x69, 0x63, 0x73, 0x18, 0x02, 0x20, 0x03, 0x28, 0x0e, 0x32, 0x23, 0x2e, 0x63, + 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x2e, 0x41, 0x64, 0x64, 0x69, 0x74, 0x69, 0x6f, 0x6e, 0x61, 0x6c, + 0x44, 0x69, 0x61, 0x67, 0x6e, 0x6f, 0x73, 0x74, 0x69, 0x63, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, + 0x74, 0x52, 0x11, 0x61, 0x64, 0x64, 0x69, 0x74, 0x69, 0x6f, 0x6e, 0x61, 0x6c, 0x4d, 0x65, 0x74, + 0x72, 0x69, 0x63, 0x73, 0x22, 0x3f, 0x0a, 0x1a, 0x44, 0x69, 0x61, 0x67, 0x6e, 0x6f, 0x73, 0x74, + 0x69, 0x63, 0x43, 0x6f, 0x6d, 0x70, 0x6f, 0x6e, 0x65, 0x6e, 0x74, 0x52, 0x65, 0x71, 0x75, 0x65, + 0x73, 0x74, 0x12, 0x21, 0x0a, 0x0c, 0x63, 0x6f, 0x6d, 0x70, 0x6f, 0x6e, 0x65, 0x6e, 0x74, 0x5f, + 0x69, 0x64, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0b, 0x63, 0x6f, 0x6d, 0x70, 0x6f, 0x6e, + 0x65, 0x6e, 0x74, 0x49, 0x64, 0x22, 0x51, 0x0a, 0x17, 0x44, 0x69, 0x61, 0x67, 0x6e, 0x6f, 0x73, + 0x74, 0x69, 0x63, 0x41, 0x67, 0x65, 0x6e, 0x74, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, + 0x12, 0x36, 0x0a, 0x07, 0x72, 0x65, 0x73, 0x75, 0x6c, 0x74, 0x73, 0x18, 0x01, 0x20, 0x03, 0x28, + 0x0b, 0x32, 0x1c, 0x2e, 0x63, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x2e, 0x44, 0x69, 0x61, 0x67, 0x6e, + 0x6f, 0x73, 0x74, 0x69, 0x63, 0x46, 0x69, 0x6c, 0x65, 0x52, 0x65, 0x73, 0x75, 0x6c, 0x74, 0x52, + 0x07, 0x72, 0x65, 0x73, 0x75, 0x6c, 0x74, 0x73, 0x22, 0x82, 0x01, 0x0a, 0x15, 0x44, 0x69, 0x61, + 0x67, 0x6e, 0x6f, 0x73, 0x74, 0x69, 0x63, 0x55, 0x6e, 0x69, 0x74, 0x52, 0x65, 0x71, 0x75, 0x65, + 0x73, 0x74, 0x12, 0x21, 0x0a, 0x0c, 0x63, 0x6f, 0x6d, 0x70, 0x6f, 0x6e, 0x65, 0x6e, 0x74, 0x5f, + 0x69, 0x64, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0b, 0x63, 0x6f, 0x6d, 0x70, 0x6f, 0x6e, + 0x65, 0x6e, 0x74, 0x49, 0x64, 0x12, 0x2d, 0x0a, 0x09, 0x75, 0x6e, 0x69, 0x74, 0x5f, 0x74, 0x79, + 0x70, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0e, 0x32, 0x10, 0x2e, 0x63, 0x70, 0x72, 0x6f, 0x74, + 0x6f, 0x2e, 0x55, 0x6e, 0x69, 0x74, 0x54, 0x79, 0x70, 0x65, 0x52, 0x08, 0x75, 0x6e, 0x69, 0x74, + 0x54, 0x79, 0x70, 0x65, 0x12, 0x17, 0x0a, 0x07, 0x75, 0x6e, 0x69, 0x74, 0x5f, 0x69, 0x64, 0x18, + 0x03, 0x20, 0x01, 0x28, 0x09, 0x52, 0x06, 0x75, 0x6e, 0x69, 0x74, 0x49, 0x64, 0x22, 0x4d, 0x0a, + 0x16, 0x44, 0x69, 0x61, 0x67, 0x6e, 0x6f, 0x73, 0x74, 0x69, 0x63, 0x55, 0x6e, 0x69, 0x74, 0x73, + 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x33, 0x0a, 0x05, 0x75, 0x6e, 0x69, 0x74, 0x73, + 0x18, 0x01, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x1d, 0x2e, 0x63, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x2e, 0x44, 0x69, 0x61, 0x67, 0x6e, 0x6f, 0x73, 0x74, 0x69, 0x63, 0x55, 0x6e, 0x69, 0x74, 0x52, 0x65, - 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x21, 0x0a, 0x0c, 0x63, 0x6f, 0x6d, 0x70, 0x6f, 0x6e, 0x65, - 0x6e, 0x74, 0x5f, 0x69, 0x64, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0b, 0x63, 0x6f, 0x6d, - 0x70, 0x6f, 0x6e, 0x65, 0x6e, 0x74, 0x49, 0x64, 0x12, 0x2d, 0x0a, 0x09, 0x75, 0x6e, 0x69, 0x74, - 0x5f, 0x74, 0x79, 0x70, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0e, 0x32, 0x10, 0x2e, 0x63, 0x70, - 0x72, 0x6f, 0x74, 0x6f, 0x2e, 0x55, 0x6e, 0x69, 0x74, 0x54, 0x79, 0x70, 0x65, 0x52, 0x08, 0x75, - 0x6e, 0x69, 0x74, 0x54, 0x79, 0x70, 0x65, 0x12, 0x17, 0x0a, 0x07, 0x75, 0x6e, 0x69, 0x74, 0x5f, - 0x69, 0x64, 0x18, 0x03, 0x20, 0x01, 0x28, 0x09, 0x52, 0x06, 0x75, 0x6e, 0x69, 0x74, 0x49, 0x64, - 0x22, 0x4d, 0x0a, 0x16, 0x44, 0x69, 0x61, 0x67, 0x6e, 0x6f, 0x73, 0x74, 0x69, 0x63, 0x55, 0x6e, - 0x69, 0x74, 0x73, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x33, 0x0a, 0x05, 0x75, 0x6e, - 0x69, 0x74, 0x73, 0x18, 0x01, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x1d, 0x2e, 0x63, 0x70, 0x72, 0x6f, - 0x74, 0x6f, 0x2e, 0x44, 0x69, 0x61, 0x67, 0x6e, 0x6f, 0x73, 0x74, 0x69, 0x63, 0x55, 0x6e, 0x69, - 0x74, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x52, 0x05, 0x75, 0x6e, 0x69, 0x74, 0x73, 0x22, - 0xd1, 0x01, 0x0a, 0x16, 0x44, 0x69, 0x61, 0x67, 0x6e, 0x6f, 0x73, 0x74, 0x69, 0x63, 0x55, 0x6e, - 0x69, 0x74, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x21, 0x0a, 0x0c, 0x63, 0x6f, - 0x6d, 0x70, 0x6f, 0x6e, 0x65, 0x6e, 0x74, 0x5f, 0x69, 0x64, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, - 0x52, 0x0b, 0x63, 0x6f, 0x6d, 0x70, 0x6f, 0x6e, 0x65, 0x6e, 0x74, 0x49, 0x64, 0x12, 0x2d, 0x0a, - 0x09, 0x75, 0x6e, 0x69, 0x74, 0x5f, 0x74, 0x79, 0x70, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0e, - 0x32, 0x10, 0x2e, 0x63, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x2e, 0x55, 0x6e, 0x69, 0x74, 0x54, 0x79, - 0x70, 0x65, 0x52, 0x08, 0x75, 0x6e, 0x69, 0x74, 0x54, 0x79, 0x70, 0x65, 0x12, 0x17, 0x0a, 0x07, - 0x75, 0x6e, 0x69, 0x74, 0x5f, 0x69, 0x64, 0x18, 0x03, 0x20, 0x01, 0x28, 0x09, 0x52, 0x06, 0x75, - 0x6e, 0x69, 0x74, 0x49, 0x64, 0x12, 0x14, 0x0a, 0x05, 0x65, 0x72, 0x72, 0x6f, 0x72, 0x18, 0x04, - 0x20, 0x01, 0x28, 0x09, 0x52, 0x05, 0x65, 0x72, 0x72, 0x6f, 0x72, 0x12, 0x36, 0x0a, 0x07, 0x72, - 0x65, 0x73, 0x75, 0x6c, 0x74, 0x73, 0x18, 0x05, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x1c, 0x2e, 0x63, - 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x2e, 0x44, 0x69, 0x61, 0x67, 0x6e, 0x6f, 0x73, 0x74, 0x69, 0x63, - 0x46, 0x69, 0x6c, 0x65, 0x52, 0x65, 0x73, 0x75, 0x6c, 0x74, 0x52, 0x07, 0x72, 0x65, 0x73, 0x75, - 0x6c, 0x74, 0x73, 0x22, 0x8e, 0x01, 0x0a, 0x1b, 0x44, 0x69, 0x61, 0x67, 0x6e, 0x6f, 0x73, 0x74, - 0x69, 0x63, 0x43, 0x6f, 0x6d, 0x70, 0x6f, 0x6e, 0x65, 0x6e, 0x74, 0x52, 0x65, 0x73, 0x70, 0x6f, - 0x6e, 0x73, 0x65, 0x12, 0x21, 0x0a, 0x0c, 0x63, 0x6f, 0x6d, 0x70, 0x6f, 0x6e, 0x65, 0x6e, 0x74, - 0x5f, 0x69, 0x64, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0b, 0x63, 0x6f, 0x6d, 0x70, 0x6f, - 0x6e, 0x65, 0x6e, 0x74, 0x49, 0x64, 0x12, 0x14, 0x0a, 0x05, 0x65, 0x72, 0x72, 0x6f, 0x72, 0x18, - 0x04, 0x20, 0x01, 0x28, 0x09, 0x52, 0x05, 0x65, 0x72, 0x72, 0x6f, 0x72, 0x12, 0x36, 0x0a, 0x07, - 0x72, 0x65, 0x73, 0x75, 0x6c, 0x74, 0x73, 0x18, 0x05, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x1c, 0x2e, - 0x63, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x2e, 0x44, 0x69, 0x61, 0x67, 0x6e, 0x6f, 0x73, 0x74, 0x69, - 0x63, 0x46, 0x69, 0x6c, 0x65, 0x52, 0x65, 0x73, 0x75, 0x6c, 0x74, 0x52, 0x07, 0x72, 0x65, 0x73, - 0x75, 0x6c, 0x74, 0x73, 0x22, 0x4f, 0x0a, 0x17, 0x44, 0x69, 0x61, 0x67, 0x6e, 0x6f, 0x73, 0x74, - 0x69, 0x63, 0x55, 0x6e, 0x69, 0x74, 0x73, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, - 0x34, 0x0a, 0x05, 0x75, 0x6e, 0x69, 0x74, 0x73, 0x18, 0x02, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x1e, + 0x71, 0x75, 0x65, 0x73, 0x74, 0x52, 0x05, 0x75, 0x6e, 0x69, 0x74, 0x73, 0x22, 0xd1, 0x01, 0x0a, + 0x16, 0x44, 0x69, 0x61, 0x67, 0x6e, 0x6f, 0x73, 0x74, 0x69, 0x63, 0x55, 0x6e, 0x69, 0x74, 0x52, + 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x21, 0x0a, 0x0c, 0x63, 0x6f, 0x6d, 0x70, 0x6f, + 0x6e, 0x65, 0x6e, 0x74, 0x5f, 0x69, 0x64, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0b, 0x63, + 0x6f, 0x6d, 0x70, 0x6f, 0x6e, 0x65, 0x6e, 0x74, 0x49, 0x64, 0x12, 0x2d, 0x0a, 0x09, 0x75, 0x6e, + 0x69, 0x74, 0x5f, 0x74, 0x79, 0x70, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0e, 0x32, 0x10, 0x2e, + 0x63, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x2e, 0x55, 0x6e, 0x69, 0x74, 0x54, 0x79, 0x70, 0x65, 0x52, + 0x08, 0x75, 0x6e, 0x69, 0x74, 0x54, 0x79, 0x70, 0x65, 0x12, 0x17, 0x0a, 0x07, 0x75, 0x6e, 0x69, + 0x74, 0x5f, 0x69, 0x64, 0x18, 0x03, 0x20, 0x01, 0x28, 0x09, 0x52, 0x06, 0x75, 0x6e, 0x69, 0x74, + 0x49, 0x64, 0x12, 0x14, 0x0a, 0x05, 0x65, 0x72, 0x72, 0x6f, 0x72, 0x18, 0x04, 0x20, 0x01, 0x28, + 0x09, 0x52, 0x05, 0x65, 0x72, 0x72, 0x6f, 0x72, 0x12, 0x36, 0x0a, 0x07, 0x72, 0x65, 0x73, 0x75, + 0x6c, 0x74, 0x73, 0x18, 0x05, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x1c, 0x2e, 0x63, 0x70, 0x72, 0x6f, + 0x74, 0x6f, 0x2e, 0x44, 0x69, 0x61, 0x67, 0x6e, 0x6f, 0x73, 0x74, 0x69, 0x63, 0x46, 0x69, 0x6c, + 0x65, 0x52, 0x65, 0x73, 0x75, 0x6c, 0x74, 0x52, 0x07, 0x72, 0x65, 0x73, 0x75, 0x6c, 0x74, 0x73, + 0x22, 0x8e, 0x01, 0x0a, 0x1b, 0x44, 0x69, 0x61, 0x67, 0x6e, 0x6f, 0x73, 0x74, 0x69, 0x63, 0x43, + 0x6f, 0x6d, 0x70, 0x6f, 0x6e, 0x65, 0x6e, 0x74, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, + 0x12, 0x21, 0x0a, 0x0c, 0x63, 0x6f, 0x6d, 0x70, 0x6f, 0x6e, 0x65, 0x6e, 0x74, 0x5f, 0x69, 0x64, + 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0b, 0x63, 0x6f, 0x6d, 0x70, 0x6f, 0x6e, 0x65, 0x6e, + 0x74, 0x49, 0x64, 0x12, 0x14, 0x0a, 0x05, 0x65, 0x72, 0x72, 0x6f, 0x72, 0x18, 0x04, 0x20, 0x01, + 0x28, 0x09, 0x52, 0x05, 0x65, 0x72, 0x72, 0x6f, 0x72, 0x12, 0x36, 0x0a, 0x07, 0x72, 0x65, 0x73, + 0x75, 0x6c, 0x74, 0x73, 0x18, 0x05, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x1c, 0x2e, 0x63, 0x70, 0x72, + 0x6f, 0x74, 0x6f, 0x2e, 0x44, 0x69, 0x61, 0x67, 0x6e, 0x6f, 0x73, 0x74, 0x69, 0x63, 0x46, 0x69, + 0x6c, 0x65, 0x52, 0x65, 0x73, 0x75, 0x6c, 0x74, 0x52, 0x07, 0x72, 0x65, 0x73, 0x75, 0x6c, 0x74, + 0x73, 0x22, 0x4f, 0x0a, 0x17, 0x44, 0x69, 0x61, 0x67, 0x6e, 0x6f, 0x73, 0x74, 0x69, 0x63, 0x55, + 0x6e, 0x69, 0x74, 0x73, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x34, 0x0a, 0x05, + 0x75, 0x6e, 0x69, 0x74, 0x73, 0x18, 0x02, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x1e, 0x2e, 0x63, 0x70, + 0x72, 0x6f, 0x74, 0x6f, 0x2e, 0x44, 0x69, 0x61, 0x67, 0x6e, 0x6f, 0x73, 0x74, 0x69, 0x63, 0x55, + 0x6e, 0x69, 0x74, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x52, 0x05, 0x75, 0x6e, 0x69, + 0x74, 0x73, 0x22, 0x2a, 0x0a, 0x10, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x75, 0x72, 0x65, 0x52, + 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x16, 0x0a, 0x06, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, + 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x06, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x2a, 0x85, + 0x01, 0x0a, 0x05, 0x53, 0x74, 0x61, 0x74, 0x65, 0x12, 0x0c, 0x0a, 0x08, 0x53, 0x54, 0x41, 0x52, + 0x54, 0x49, 0x4e, 0x47, 0x10, 0x00, 0x12, 0x0f, 0x0a, 0x0b, 0x43, 0x4f, 0x4e, 0x46, 0x49, 0x47, + 0x55, 0x52, 0x49, 0x4e, 0x47, 0x10, 0x01, 0x12, 0x0b, 0x0a, 0x07, 0x48, 0x45, 0x41, 0x4c, 0x54, + 0x48, 0x59, 0x10, 0x02, 0x12, 0x0c, 0x0a, 0x08, 0x44, 0x45, 0x47, 0x52, 0x41, 0x44, 0x45, 0x44, + 0x10, 0x03, 0x12, 0x0a, 0x0a, 0x06, 0x46, 0x41, 0x49, 0x4c, 0x45, 0x44, 0x10, 0x04, 0x12, 0x0c, + 0x0a, 0x08, 0x53, 0x54, 0x4f, 0x50, 0x50, 0x49, 0x4e, 0x47, 0x10, 0x05, 0x12, 0x0b, 0x0a, 0x07, + 0x53, 0x54, 0x4f, 0x50, 0x50, 0x45, 0x44, 0x10, 0x06, 0x12, 0x0d, 0x0a, 0x09, 0x55, 0x50, 0x47, + 0x52, 0x41, 0x44, 0x49, 0x4e, 0x47, 0x10, 0x07, 0x12, 0x0c, 0x0a, 0x08, 0x52, 0x4f, 0x4c, 0x4c, + 0x42, 0x41, 0x43, 0x4b, 0x10, 0x08, 0x2a, 0x21, 0x0a, 0x08, 0x55, 0x6e, 0x69, 0x74, 0x54, 0x79, + 0x70, 0x65, 0x12, 0x09, 0x0a, 0x05, 0x49, 0x4e, 0x50, 0x55, 0x54, 0x10, 0x00, 0x12, 0x0a, 0x0a, + 0x06, 0x4f, 0x55, 0x54, 0x50, 0x55, 0x54, 0x10, 0x01, 0x2a, 0x28, 0x0a, 0x0c, 0x41, 0x63, 0x74, + 0x69, 0x6f, 0x6e, 0x53, 0x74, 0x61, 0x74, 0x75, 0x73, 0x12, 0x0b, 0x0a, 0x07, 0x53, 0x55, 0x43, + 0x43, 0x45, 0x53, 0x53, 0x10, 0x00, 0x12, 0x0b, 0x0a, 0x07, 0x46, 0x41, 0x49, 0x4c, 0x55, 0x52, + 0x45, 0x10, 0x01, 0x2a, 0x7f, 0x0a, 0x0b, 0x50, 0x70, 0x72, 0x6f, 0x66, 0x4f, 0x70, 0x74, 0x69, + 0x6f, 0x6e, 0x12, 0x0a, 0x0a, 0x06, 0x41, 0x4c, 0x4c, 0x4f, 0x43, 0x53, 0x10, 0x00, 0x12, 0x09, + 0x0a, 0x05, 0x42, 0x4c, 0x4f, 0x43, 0x4b, 0x10, 0x01, 0x12, 0x0b, 0x0a, 0x07, 0x43, 0x4d, 0x44, + 0x4c, 0x49, 0x4e, 0x45, 0x10, 0x02, 0x12, 0x0d, 0x0a, 0x09, 0x47, 0x4f, 0x52, 0x4f, 0x55, 0x54, + 0x49, 0x4e, 0x45, 0x10, 0x03, 0x12, 0x08, 0x0a, 0x04, 0x48, 0x45, 0x41, 0x50, 0x10, 0x04, 0x12, + 0x09, 0x0a, 0x05, 0x4d, 0x55, 0x54, 0x45, 0x58, 0x10, 0x05, 0x12, 0x0b, 0x0a, 0x07, 0x50, 0x52, + 0x4f, 0x46, 0x49, 0x4c, 0x45, 0x10, 0x06, 0x12, 0x10, 0x0a, 0x0c, 0x54, 0x48, 0x52, 0x45, 0x41, + 0x44, 0x43, 0x52, 0x45, 0x41, 0x54, 0x45, 0x10, 0x07, 0x12, 0x09, 0x0a, 0x05, 0x54, 0x52, 0x41, + 0x43, 0x45, 0x10, 0x08, 0x2a, 0x26, 0x0a, 0x1b, 0x41, 0x64, 0x64, 0x69, 0x74, 0x69, 0x6f, 0x6e, + 0x61, 0x6c, 0x44, 0x69, 0x61, 0x67, 0x6e, 0x6f, 0x73, 0x74, 0x69, 0x63, 0x52, 0x65, 0x71, 0x75, + 0x65, 0x73, 0x74, 0x12, 0x07, 0x0a, 0x03, 0x43, 0x50, 0x55, 0x10, 0x00, 0x32, 0xdf, 0x04, 0x0a, + 0x13, 0x45, 0x6c, 0x61, 0x73, 0x74, 0x69, 0x63, 0x41, 0x67, 0x65, 0x6e, 0x74, 0x43, 0x6f, 0x6e, + 0x74, 0x72, 0x6f, 0x6c, 0x12, 0x31, 0x0a, 0x07, 0x56, 0x65, 0x72, 0x73, 0x69, 0x6f, 0x6e, 0x12, + 0x0d, 0x2e, 0x63, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x2e, 0x45, 0x6d, 0x70, 0x74, 0x79, 0x1a, 0x17, + 0x2e, 0x63, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x2e, 0x56, 0x65, 0x72, 0x73, 0x69, 0x6f, 0x6e, 0x52, + 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x2d, 0x0a, 0x05, 0x53, 0x74, 0x61, 0x74, 0x65, + 0x12, 0x0d, 0x2e, 0x63, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x2e, 0x45, 0x6d, 0x70, 0x74, 0x79, 0x1a, + 0x15, 0x2e, 0x63, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x2e, 0x53, 0x74, 0x61, 0x74, 0x65, 0x52, 0x65, + 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x34, 0x0a, 0x0a, 0x53, 0x74, 0x61, 0x74, 0x65, 0x57, + 0x61, 0x74, 0x63, 0x68, 0x12, 0x0d, 0x2e, 0x63, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x2e, 0x45, 0x6d, + 0x70, 0x74, 0x79, 0x1a, 0x15, 0x2e, 0x63, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x2e, 0x53, 0x74, 0x61, + 0x74, 0x65, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x30, 0x01, 0x12, 0x31, 0x0a, 0x07, + 0x52, 0x65, 0x73, 0x74, 0x61, 0x72, 0x74, 0x12, 0x0d, 0x2e, 0x63, 0x70, 0x72, 0x6f, 0x74, 0x6f, + 0x2e, 0x45, 0x6d, 0x70, 0x74, 0x79, 0x1a, 0x17, 0x2e, 0x63, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x2e, + 0x52, 0x65, 0x73, 0x74, 0x61, 0x72, 0x74, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, + 0x3a, 0x0a, 0x07, 0x55, 0x70, 0x67, 0x72, 0x61, 0x64, 0x65, 0x12, 0x16, 0x2e, 0x63, 0x70, 0x72, + 0x6f, 0x74, 0x6f, 0x2e, 0x55, 0x70, 0x67, 0x72, 0x61, 0x64, 0x65, 0x52, 0x65, 0x71, 0x75, 0x65, + 0x73, 0x74, 0x1a, 0x17, 0x2e, 0x63, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x2e, 0x55, 0x70, 0x67, 0x72, + 0x61, 0x64, 0x65, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x52, 0x0a, 0x0f, 0x44, + 0x69, 0x61, 0x67, 0x6e, 0x6f, 0x73, 0x74, 0x69, 0x63, 0x41, 0x67, 0x65, 0x6e, 0x74, 0x12, 0x1e, + 0x2e, 0x63, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x2e, 0x44, 0x69, 0x61, 0x67, 0x6e, 0x6f, 0x73, 0x74, + 0x69, 0x63, 0x41, 0x67, 0x65, 0x6e, 0x74, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x1f, 0x2e, 0x63, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x2e, 0x44, 0x69, 0x61, 0x67, 0x6e, 0x6f, 0x73, 0x74, - 0x69, 0x63, 0x55, 0x6e, 0x69, 0x74, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x52, 0x05, - 0x75, 0x6e, 0x69, 0x74, 0x73, 0x22, 0x2a, 0x0a, 0x10, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x75, - 0x72, 0x65, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x16, 0x0a, 0x06, 0x63, 0x6f, 0x6e, - 0x66, 0x69, 0x67, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x06, 0x63, 0x6f, 0x6e, 0x66, 0x69, - 0x67, 0x2a, 0x85, 0x01, 0x0a, 0x05, 0x53, 0x74, 0x61, 0x74, 0x65, 0x12, 0x0c, 0x0a, 0x08, 0x53, - 0x54, 0x41, 0x52, 0x54, 0x49, 0x4e, 0x47, 0x10, 0x00, 0x12, 0x0f, 0x0a, 0x0b, 0x43, 0x4f, 0x4e, - 0x46, 0x49, 0x47, 0x55, 0x52, 0x49, 0x4e, 0x47, 0x10, 0x01, 0x12, 0x0b, 0x0a, 0x07, 0x48, 0x45, - 0x41, 0x4c, 0x54, 0x48, 0x59, 0x10, 0x02, 0x12, 0x0c, 0x0a, 0x08, 0x44, 0x45, 0x47, 0x52, 0x41, - 0x44, 0x45, 0x44, 0x10, 0x03, 0x12, 0x0a, 0x0a, 0x06, 0x46, 0x41, 0x49, 0x4c, 0x45, 0x44, 0x10, - 0x04, 0x12, 0x0c, 0x0a, 0x08, 0x53, 0x54, 0x4f, 0x50, 0x50, 0x49, 0x4e, 0x47, 0x10, 0x05, 0x12, - 0x0b, 0x0a, 0x07, 0x53, 0x54, 0x4f, 0x50, 0x50, 0x45, 0x44, 0x10, 0x06, 0x12, 0x0d, 0x0a, 0x09, - 0x55, 0x50, 0x47, 0x52, 0x41, 0x44, 0x49, 0x4e, 0x47, 0x10, 0x07, 0x12, 0x0c, 0x0a, 0x08, 0x52, - 0x4f, 0x4c, 0x4c, 0x42, 0x41, 0x43, 0x4b, 0x10, 0x08, 0x2a, 0x21, 0x0a, 0x08, 0x55, 0x6e, 0x69, - 0x74, 0x54, 0x79, 0x70, 0x65, 0x12, 0x09, 0x0a, 0x05, 0x49, 0x4e, 0x50, 0x55, 0x54, 0x10, 0x00, - 0x12, 0x0a, 0x0a, 0x06, 0x4f, 0x55, 0x54, 0x50, 0x55, 0x54, 0x10, 0x01, 0x2a, 0x28, 0x0a, 0x0c, - 0x41, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x53, 0x74, 0x61, 0x74, 0x75, 0x73, 0x12, 0x0b, 0x0a, 0x07, - 0x53, 0x55, 0x43, 0x43, 0x45, 0x53, 0x53, 0x10, 0x00, 0x12, 0x0b, 0x0a, 0x07, 0x46, 0x41, 0x49, - 0x4c, 0x55, 0x52, 0x45, 0x10, 0x01, 0x2a, 0x7f, 0x0a, 0x0b, 0x50, 0x70, 0x72, 0x6f, 0x66, 0x4f, - 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x12, 0x0a, 0x0a, 0x06, 0x41, 0x4c, 0x4c, 0x4f, 0x43, 0x53, 0x10, - 0x00, 0x12, 0x09, 0x0a, 0x05, 0x42, 0x4c, 0x4f, 0x43, 0x4b, 0x10, 0x01, 0x12, 0x0b, 0x0a, 0x07, - 0x43, 0x4d, 0x44, 0x4c, 0x49, 0x4e, 0x45, 0x10, 0x02, 0x12, 0x0d, 0x0a, 0x09, 0x47, 0x4f, 0x52, - 0x4f, 0x55, 0x54, 0x49, 0x4e, 0x45, 0x10, 0x03, 0x12, 0x08, 0x0a, 0x04, 0x48, 0x45, 0x41, 0x50, - 0x10, 0x04, 0x12, 0x09, 0x0a, 0x05, 0x4d, 0x55, 0x54, 0x45, 0x58, 0x10, 0x05, 0x12, 0x0b, 0x0a, - 0x07, 0x50, 0x52, 0x4f, 0x46, 0x49, 0x4c, 0x45, 0x10, 0x06, 0x12, 0x10, 0x0a, 0x0c, 0x54, 0x48, - 0x52, 0x45, 0x41, 0x44, 0x43, 0x52, 0x45, 0x41, 0x54, 0x45, 0x10, 0x07, 0x12, 0x09, 0x0a, 0x05, - 0x54, 0x52, 0x41, 0x43, 0x45, 0x10, 0x08, 0x2a, 0x26, 0x0a, 0x1b, 0x41, 0x64, 0x64, 0x69, 0x74, - 0x69, 0x6f, 0x6e, 0x61, 0x6c, 0x44, 0x69, 0x61, 0x67, 0x6e, 0x6f, 0x73, 0x74, 0x69, 0x63, 0x52, - 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x07, 0x0a, 0x03, 0x43, 0x50, 0x55, 0x10, 0x00, 0x32, - 0xdf, 0x04, 0x0a, 0x13, 0x45, 0x6c, 0x61, 0x73, 0x74, 0x69, 0x63, 0x41, 0x67, 0x65, 0x6e, 0x74, - 0x43, 0x6f, 0x6e, 0x74, 0x72, 0x6f, 0x6c, 0x12, 0x31, 0x0a, 0x07, 0x56, 0x65, 0x72, 0x73, 0x69, - 0x6f, 0x6e, 0x12, 0x0d, 0x2e, 0x63, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x2e, 0x45, 0x6d, 0x70, 0x74, - 0x79, 0x1a, 0x17, 0x2e, 0x63, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x2e, 0x56, 0x65, 0x72, 0x73, 0x69, - 0x6f, 0x6e, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x2d, 0x0a, 0x05, 0x53, 0x74, - 0x61, 0x74, 0x65, 0x12, 0x0d, 0x2e, 0x63, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x2e, 0x45, 0x6d, 0x70, - 0x74, 0x79, 0x1a, 0x15, 0x2e, 0x63, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x2e, 0x53, 0x74, 0x61, 0x74, - 0x65, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x34, 0x0a, 0x0a, 0x53, 0x74, 0x61, - 0x74, 0x65, 0x57, 0x61, 0x74, 0x63, 0x68, 0x12, 0x0d, 0x2e, 0x63, 0x70, 0x72, 0x6f, 0x74, 0x6f, - 0x2e, 0x45, 0x6d, 0x70, 0x74, 0x79, 0x1a, 0x15, 0x2e, 0x63, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x2e, - 0x53, 0x74, 0x61, 0x74, 0x65, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x30, 0x01, 0x12, - 0x31, 0x0a, 0x07, 0x52, 0x65, 0x73, 0x74, 0x61, 0x72, 0x74, 0x12, 0x0d, 0x2e, 0x63, 0x70, 0x72, - 0x6f, 0x74, 0x6f, 0x2e, 0x45, 0x6d, 0x70, 0x74, 0x79, 0x1a, 0x17, 0x2e, 0x63, 0x70, 0x72, 0x6f, - 0x74, 0x6f, 0x2e, 0x52, 0x65, 0x73, 0x74, 0x61, 0x72, 0x74, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, - 0x73, 0x65, 0x12, 0x3a, 0x0a, 0x07, 0x55, 0x70, 0x67, 0x72, 0x61, 0x64, 0x65, 0x12, 0x16, 0x2e, - 0x63, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x2e, 0x55, 0x70, 0x67, 0x72, 0x61, 0x64, 0x65, 0x52, 0x65, - 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x17, 0x2e, 0x63, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x2e, 0x55, - 0x70, 0x67, 0x72, 0x61, 0x64, 0x65, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x52, - 0x0a, 0x0f, 0x44, 0x69, 0x61, 0x67, 0x6e, 0x6f, 0x73, 0x74, 0x69, 0x63, 0x41, 0x67, 0x65, 0x6e, - 0x74, 0x12, 0x1e, 0x2e, 0x63, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x2e, 0x44, 0x69, 0x61, 0x67, 0x6e, - 0x6f, 0x73, 0x74, 0x69, 0x63, 0x41, 0x67, 0x65, 0x6e, 0x74, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, - 0x74, 0x1a, 0x1f, 0x2e, 0x63, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x2e, 0x44, 0x69, 0x61, 0x67, 0x6e, - 0x6f, 0x73, 0x74, 0x69, 0x63, 0x41, 0x67, 0x65, 0x6e, 0x74, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, - 0x73, 0x65, 0x12, 0x53, 0x0a, 0x0f, 0x44, 0x69, 0x61, 0x67, 0x6e, 0x6f, 0x73, 0x74, 0x69, 0x63, - 0x55, 0x6e, 0x69, 0x74, 0x73, 0x12, 0x1e, 0x2e, 0x63, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x2e, 0x44, - 0x69, 0x61, 0x67, 0x6e, 0x6f, 0x73, 0x74, 0x69, 0x63, 0x55, 0x6e, 0x69, 0x74, 0x73, 0x52, 0x65, - 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x1e, 0x2e, 0x63, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x2e, 0x44, - 0x69, 0x61, 0x67, 0x6e, 0x6f, 0x73, 0x74, 0x69, 0x63, 0x55, 0x6e, 0x69, 0x74, 0x52, 0x65, 0x73, - 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x30, 0x01, 0x12, 0x62, 0x0a, 0x14, 0x44, 0x69, 0x61, 0x67, 0x6e, - 0x6f, 0x73, 0x74, 0x69, 0x63, 0x43, 0x6f, 0x6d, 0x70, 0x6f, 0x6e, 0x65, 0x6e, 0x74, 0x73, 0x12, - 0x23, 0x2e, 0x63, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x2e, 0x44, 0x69, 0x61, 0x67, 0x6e, 0x6f, 0x73, - 0x74, 0x69, 0x63, 0x43, 0x6f, 0x6d, 0x70, 0x6f, 0x6e, 0x65, 0x6e, 0x74, 0x73, 0x52, 0x65, 0x71, - 0x75, 0x65, 0x73, 0x74, 0x1a, 0x23, 0x2e, 0x63, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x2e, 0x44, 0x69, - 0x61, 0x67, 0x6e, 0x6f, 0x73, 0x74, 0x69, 0x63, 0x43, 0x6f, 0x6d, 0x70, 0x6f, 0x6e, 0x65, 0x6e, - 0x74, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x30, 0x01, 0x12, 0x34, 0x0a, 0x09, 0x43, - 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x75, 0x72, 0x65, 0x12, 0x18, 0x2e, 0x63, 0x70, 0x72, 0x6f, 0x74, - 0x6f, 0x2e, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x75, 0x72, 0x65, 0x52, 0x65, 0x71, 0x75, 0x65, - 0x73, 0x74, 0x1a, 0x0d, 0x2e, 0x63, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x2e, 0x45, 0x6d, 0x70, 0x74, - 0x79, 0x42, 0x29, 0x5a, 0x24, 0x69, 0x6e, 0x74, 0x65, 0x72, 0x6e, 0x61, 0x6c, 0x2f, 0x70, 0x6b, - 0x67, 0x2f, 0x61, 0x67, 0x65, 0x6e, 0x74, 0x2f, 0x63, 0x6f, 0x6e, 0x74, 0x72, 0x6f, 0x6c, 0x2f, - 0x76, 0x32, 0x2f, 0x63, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0xf8, 0x01, 0x01, 0x62, 0x06, 0x70, 0x72, - 0x6f, 0x74, 0x6f, 0x33, + 0x69, 0x63, 0x41, 0x67, 0x65, 0x6e, 0x74, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, + 0x53, 0x0a, 0x0f, 0x44, 0x69, 0x61, 0x67, 0x6e, 0x6f, 0x73, 0x74, 0x69, 0x63, 0x55, 0x6e, 0x69, + 0x74, 0x73, 0x12, 0x1e, 0x2e, 0x63, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x2e, 0x44, 0x69, 0x61, 0x67, + 0x6e, 0x6f, 0x73, 0x74, 0x69, 0x63, 0x55, 0x6e, 0x69, 0x74, 0x73, 0x52, 0x65, 0x71, 0x75, 0x65, + 0x73, 0x74, 0x1a, 0x1e, 0x2e, 0x63, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x2e, 0x44, 0x69, 0x61, 0x67, + 0x6e, 0x6f, 0x73, 0x74, 0x69, 0x63, 0x55, 0x6e, 0x69, 0x74, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, + 0x73, 0x65, 0x30, 0x01, 0x12, 0x62, 0x0a, 0x14, 0x44, 0x69, 0x61, 0x67, 0x6e, 0x6f, 0x73, 0x74, + 0x69, 0x63, 0x43, 0x6f, 0x6d, 0x70, 0x6f, 0x6e, 0x65, 0x6e, 0x74, 0x73, 0x12, 0x23, 0x2e, 0x63, + 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x2e, 0x44, 0x69, 0x61, 0x67, 0x6e, 0x6f, 0x73, 0x74, 0x69, 0x63, + 0x43, 0x6f, 0x6d, 0x70, 0x6f, 0x6e, 0x65, 0x6e, 0x74, 0x73, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, + 0x74, 0x1a, 0x23, 0x2e, 0x63, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x2e, 0x44, 0x69, 0x61, 0x67, 0x6e, + 0x6f, 0x73, 0x74, 0x69, 0x63, 0x43, 0x6f, 0x6d, 0x70, 0x6f, 0x6e, 0x65, 0x6e, 0x74, 0x52, 0x65, + 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x30, 0x01, 0x12, 0x34, 0x0a, 0x09, 0x43, 0x6f, 0x6e, 0x66, + 0x69, 0x67, 0x75, 0x72, 0x65, 0x12, 0x18, 0x2e, 0x63, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x2e, 0x43, + 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x75, 0x72, 0x65, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, + 0x0d, 0x2e, 0x63, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x2e, 0x45, 0x6d, 0x70, 0x74, 0x79, 0x42, 0x29, + 0x5a, 0x24, 0x69, 0x6e, 0x74, 0x65, 0x72, 0x6e, 0x61, 0x6c, 0x2f, 0x70, 0x6b, 0x67, 0x2f, 0x61, + 0x67, 0x65, 0x6e, 0x74, 0x2f, 0x63, 0x6f, 0x6e, 0x74, 0x72, 0x6f, 0x6c, 0x2f, 0x76, 0x32, 0x2f, + 0x63, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0xf8, 0x01, 0x01, 0x62, 0x06, 0x70, 0x72, 0x6f, 0x74, 0x6f, + 0x33, } var ( @@ -2243,41 +2266,40 @@ var file_control_v2_proto_depIdxs = []int32{ 12, // 11: cproto.StateResponse.components:type_name -> cproto.ComponentState 15, // 12: cproto.StateResponse.upgrade_details:type_name -> cproto.UpgradeDetails 16, // 13: cproto.UpgradeDetails.metadata:type_name -> cproto.UpgradeDetailsMetadata - 29, // 14: cproto.UpgradeDetailsMetadata.scheduled_at:type_name -> google.protobuf.Timestamp - 29, // 15: cproto.DiagnosticFileResult.generated:type_name -> google.protobuf.Timestamp - 4, // 16: cproto.DiagnosticAgentRequest.additional_metrics:type_name -> cproto.AdditionalDiagnosticRequest - 20, // 17: cproto.DiagnosticComponentsRequest.components:type_name -> cproto.DiagnosticComponentRequest - 4, // 18: cproto.DiagnosticComponentsRequest.additional_metrics:type_name -> cproto.AdditionalDiagnosticRequest - 17, // 19: cproto.DiagnosticAgentResponse.results:type_name -> cproto.DiagnosticFileResult - 1, // 20: cproto.DiagnosticUnitRequest.unit_type:type_name -> cproto.UnitType - 22, // 21: cproto.DiagnosticUnitsRequest.units:type_name -> cproto.DiagnosticUnitRequest - 1, // 22: cproto.DiagnosticUnitResponse.unit_type:type_name -> cproto.UnitType - 17, // 23: cproto.DiagnosticUnitResponse.results:type_name -> cproto.DiagnosticFileResult - 17, // 24: cproto.DiagnosticComponentResponse.results:type_name -> cproto.DiagnosticFileResult - 24, // 25: cproto.DiagnosticUnitsResponse.units:type_name -> cproto.DiagnosticUnitResponse - 5, // 26: cproto.ElasticAgentControl.Version:input_type -> cproto.Empty - 5, // 27: cproto.ElasticAgentControl.State:input_type -> cproto.Empty - 5, // 28: cproto.ElasticAgentControl.StateWatch:input_type -> cproto.Empty - 5, // 29: cproto.ElasticAgentControl.Restart:input_type -> cproto.Empty - 8, // 30: cproto.ElasticAgentControl.Upgrade:input_type -> cproto.UpgradeRequest - 18, // 31: cproto.ElasticAgentControl.DiagnosticAgent:input_type -> cproto.DiagnosticAgentRequest - 23, // 32: cproto.ElasticAgentControl.DiagnosticUnits:input_type -> cproto.DiagnosticUnitsRequest - 19, // 33: cproto.ElasticAgentControl.DiagnosticComponents:input_type -> cproto.DiagnosticComponentsRequest - 27, // 34: cproto.ElasticAgentControl.Configure:input_type -> cproto.ConfigureRequest - 6, // 35: cproto.ElasticAgentControl.Version:output_type -> cproto.VersionResponse - 14, // 36: cproto.ElasticAgentControl.State:output_type -> cproto.StateResponse - 14, // 37: cproto.ElasticAgentControl.StateWatch:output_type -> cproto.StateResponse - 7, // 38: cproto.ElasticAgentControl.Restart:output_type -> cproto.RestartResponse - 9, // 39: cproto.ElasticAgentControl.Upgrade:output_type -> cproto.UpgradeResponse - 21, // 40: cproto.ElasticAgentControl.DiagnosticAgent:output_type -> cproto.DiagnosticAgentResponse - 24, // 41: cproto.ElasticAgentControl.DiagnosticUnits:output_type -> cproto.DiagnosticUnitResponse - 25, // 42: cproto.ElasticAgentControl.DiagnosticComponents:output_type -> cproto.DiagnosticComponentResponse - 5, // 43: cproto.ElasticAgentControl.Configure:output_type -> cproto.Empty - 35, // [35:44] is the sub-list for method output_type - 26, // [26:35] is the sub-list for method input_type - 26, // [26:26] is the sub-list for extension type_name - 26, // [26:26] is the sub-list for extension extendee - 0, // [0:26] is the sub-list for field type_name + 29, // 14: cproto.DiagnosticFileResult.generated:type_name -> google.protobuf.Timestamp + 4, // 15: cproto.DiagnosticAgentRequest.additional_metrics:type_name -> cproto.AdditionalDiagnosticRequest + 20, // 16: cproto.DiagnosticComponentsRequest.components:type_name -> cproto.DiagnosticComponentRequest + 4, // 17: cproto.DiagnosticComponentsRequest.additional_metrics:type_name -> cproto.AdditionalDiagnosticRequest + 17, // 18: cproto.DiagnosticAgentResponse.results:type_name -> cproto.DiagnosticFileResult + 1, // 19: cproto.DiagnosticUnitRequest.unit_type:type_name -> cproto.UnitType + 22, // 20: cproto.DiagnosticUnitsRequest.units:type_name -> cproto.DiagnosticUnitRequest + 1, // 21: cproto.DiagnosticUnitResponse.unit_type:type_name -> cproto.UnitType + 17, // 22: cproto.DiagnosticUnitResponse.results:type_name -> cproto.DiagnosticFileResult + 17, // 23: cproto.DiagnosticComponentResponse.results:type_name -> cproto.DiagnosticFileResult + 24, // 24: cproto.DiagnosticUnitsResponse.units:type_name -> cproto.DiagnosticUnitResponse + 5, // 25: cproto.ElasticAgentControl.Version:input_type -> cproto.Empty + 5, // 26: cproto.ElasticAgentControl.State:input_type -> cproto.Empty + 5, // 27: cproto.ElasticAgentControl.StateWatch:input_type -> cproto.Empty + 5, // 28: cproto.ElasticAgentControl.Restart:input_type -> cproto.Empty + 8, // 29: cproto.ElasticAgentControl.Upgrade:input_type -> cproto.UpgradeRequest + 18, // 30: cproto.ElasticAgentControl.DiagnosticAgent:input_type -> cproto.DiagnosticAgentRequest + 23, // 31: cproto.ElasticAgentControl.DiagnosticUnits:input_type -> cproto.DiagnosticUnitsRequest + 19, // 32: cproto.ElasticAgentControl.DiagnosticComponents:input_type -> cproto.DiagnosticComponentsRequest + 27, // 33: cproto.ElasticAgentControl.Configure:input_type -> cproto.ConfigureRequest + 6, // 34: cproto.ElasticAgentControl.Version:output_type -> cproto.VersionResponse + 14, // 35: cproto.ElasticAgentControl.State:output_type -> cproto.StateResponse + 14, // 36: cproto.ElasticAgentControl.StateWatch:output_type -> cproto.StateResponse + 7, // 37: cproto.ElasticAgentControl.Restart:output_type -> cproto.RestartResponse + 9, // 38: cproto.ElasticAgentControl.Upgrade:output_type -> cproto.UpgradeResponse + 21, // 39: cproto.ElasticAgentControl.DiagnosticAgent:output_type -> cproto.DiagnosticAgentResponse + 24, // 40: cproto.ElasticAgentControl.DiagnosticUnits:output_type -> cproto.DiagnosticUnitResponse + 25, // 41: cproto.ElasticAgentControl.DiagnosticComponents:output_type -> cproto.DiagnosticComponentResponse + 5, // 42: cproto.ElasticAgentControl.Configure:output_type -> cproto.Empty + 34, // [34:43] is the sub-list for method output_type + 25, // [25:34] is the sub-list for method input_type + 25, // [25:25] is the sub-list for extension type_name + 25, // [25:25] is the sub-list for extension extendee + 0, // [0:25] is the sub-list for field type_name } func init() { file_control_v2_proto_init() } diff --git a/pkg/control/v2/server/server.go b/pkg/control/v2/server/server.go index 149c426e2e4..44b1bdfca6a 100644 --- a/pkg/control/v2/server/server.go +++ b/pkg/control/v2/server/server.go @@ -373,12 +373,19 @@ func stateToProto(state *coordinator.State, agentInfo *info.AgentInfo) (*cproto. DownloadPercent: float32(state.UpgradeDetails.Metadata.DownloadPercent), FailedState: string(state.UpgradeDetails.Metadata.FailedState), ErrorMsg: state.UpgradeDetails.Metadata.ErrorMsg, + RetryErrorMsg: state.UpgradeDetails.Metadata.RetryErrorMsg, }, } if state.UpgradeDetails.Metadata.ScheduledAt != nil && !state.UpgradeDetails.Metadata.ScheduledAt.IsZero() { - upgradeDetails.Metadata.ScheduledAt = timestamppb.New(*state.UpgradeDetails.Metadata.ScheduledAt) + upgradeDetails.Metadata.ScheduledAt = state.UpgradeDetails.Metadata.ScheduledAt.Format(control.TimeFormat()) + + } + + if state.UpgradeDetails.Metadata.RetryUntil != nil && + !state.UpgradeDetails.Metadata.RetryUntil.IsZero() { + upgradeDetails.Metadata.RetryUntil = state.UpgradeDetails.Metadata.RetryUntil.Format(control.TimeFormat()) } } diff --git a/pkg/control/v2/server/server_test.go b/pkg/control/v2/server/server_test.go index d7aee6d96c7..c001072863a 100644 --- a/pkg/control/v2/server/server_test.go +++ b/pkg/control/v2/server/server_test.go @@ -6,8 +6,7 @@ package server import ( "testing" - - "google.golang.org/protobuf/types/known/timestamppb" + "time" "github.com/stretchr/testify/assert" "github.com/stretchr/testify/require" @@ -19,11 +18,12 @@ import ( "github.com/elastic/elastic-agent/internal/pkg/agent/application/upgrade/details" "github.com/elastic/elastic-agent/pkg/component" "github.com/elastic/elastic-agent/pkg/component/runtime" + "github.com/elastic/elastic-agent/pkg/control" "github.com/elastic/elastic-agent/pkg/control/v2/cproto" ) func TestStateMapping(t *testing.T) { - + now := time.Now() testcases := []struct { name string agentState cproto.State @@ -62,9 +62,14 @@ func TestStateMapping(t *testing.T) { upgradeDetails: &details.Details{ TargetVersion: "8.13.0", State: details.StateDownloading, - ActionID: "", + ActionID: "some-action-id", Metadata: details.Metadata{ + ScheduledAt: &now, DownloadPercent: 1.7, + ErrorMsg: "some error", + RetryUntil: &now, + RetryErrorMsg: "some retryable error", + FailedState: details.StateWatching, }, }, }, @@ -167,11 +172,17 @@ func TestStateMapping(t *testing.T) { DownloadPercent: float32(tc.upgradeDetails.Metadata.DownloadPercent), FailedState: string(tc.upgradeDetails.Metadata.FailedState), ErrorMsg: tc.upgradeDetails.Metadata.ErrorMsg, + RetryErrorMsg: tc.upgradeDetails.Metadata.RetryErrorMsg, } if tc.upgradeDetails.Metadata.ScheduledAt != nil && !tc.upgradeDetails.Metadata.ScheduledAt.IsZero() { - expectedMetadata.ScheduledAt = timestamppb.New(*tc.upgradeDetails.Metadata.ScheduledAt) + expectedMetadata.ScheduledAt = tc.upgradeDetails.Metadata.ScheduledAt.Format(control.TimeFormat()) + } + + if tc.upgradeDetails.Metadata.RetryUntil != nil && + !tc.upgradeDetails.Metadata.RetryUntil.IsZero() { + expectedMetadata.RetryUntil = tc.upgradeDetails.Metadata.RetryUntil.Format(control.TimeFormat()) } assert.Equal(t, string(tc.upgradeDetails.State), stateResponse.UpgradeDetails.State) diff --git a/pkg/testing/define/batch.go b/pkg/testing/define/batch.go index 4efa9f5ad4c..24c71fca4a3 100644 --- a/pkg/testing/define/batch.go +++ b/pkg/testing/define/batch.go @@ -41,15 +41,16 @@ var defaultOS = []OS{ // Batch is a grouping of tests that all have the same requirements. type Batch struct { + // Group must be set on each test to define which group the tests belongs. + // Tests that are in the same group are executed on the same runner. + Group string `json:"group"` + // OS defines the operating systems this test batch needs. OS OS `json:"os"` // Stack defines the stack required for this batch. Stack *Stack `json:"stack,omitempty"` - // Isolate defines that this batch is isolated to a single test. - Isolate bool `json:"isolate"` - // Tests define the set of packages and tests that do not require sudo // privileges to be performed. Tests []BatchPackageTests `json:"tests"` @@ -177,15 +178,12 @@ func appendTest(batches []Batch, tar testActionResult, req Requirements) []Batch } for _, o := range set { var batch Batch - batchIdx := -1 - if !req.Isolate { - batchIdx = findBatchIdx(batches, o, req.Stack) - } + batchIdx := findBatchIdx(batches, req.Group, o, req.Stack) if batchIdx == -1 { // new batch required batch = Batch{ + Group: req.Group, OS: o, - Isolate: req.Isolate, Tests: nil, SudoTests: nil, } @@ -241,10 +239,10 @@ func appendPackageTest(tests []BatchPackageTests, pkg string, name string, stack return tests } -func findBatchIdx(batches []Batch, os OS, stack *Stack) int { +func findBatchIdx(batches []Batch, group string, os OS, stack *Stack) int { for i, b := range batches { - if b.Isolate { - // never add to an isolate batch + if b.Group != group { + // must be in the same group continue } if b.OS.Type != os.Type || b.OS.Arch != os.Arch { diff --git a/pkg/testing/define/batch_test.go b/pkg/testing/define/batch_test.go index 0f9afbf3691..a7e265b5f0e 100644 --- a/pkg/testing/define/batch_test.go +++ b/pkg/testing/define/batch_test.go @@ -93,6 +93,7 @@ func TestBatch(t *testing.T) { } expected := []Batch{ { + Group: Default, OS: OS{ Type: Darwin, Arch: AMD64, @@ -101,6 +102,7 @@ func TestBatch(t *testing.T) { SudoTests: darwinSudoTests, }, { + Group: Default, OS: OS{ Type: Darwin, Arch: ARM64, @@ -109,6 +111,7 @@ func TestBatch(t *testing.T) { SudoTests: darwinSudoTests, }, { + Group: Default, OS: OS{ Type: Linux, Arch: AMD64, @@ -117,6 +120,7 @@ func TestBatch(t *testing.T) { SudoTests: linuxSudoTests, }, { + Group: Default, OS: OS{ Type: Linux, Arch: ARM64, @@ -152,6 +156,7 @@ func TestBatch(t *testing.T) { SudoTests: linuxSudoTests, }, { + Group: Default, OS: OS{ Type: Windows, Arch: AMD64, @@ -160,170 +165,47 @@ func TestBatch(t *testing.T) { SudoTests: windowsSudoTests, }, { + Group: "one", OS: OS{ - Type: Darwin, - Arch: AMD64, - }, - Isolate: true, - Tests: []BatchPackageTests{ - { - Name: pkgName, - Tests: []BatchPackageTest{ - { - Name: "TestAnyIsolate", - }, - }, - }, - }, - }, - { - OS: OS{ - Type: Darwin, - Arch: ARM64, - }, - Isolate: true, - Tests: []BatchPackageTests{ - { - Name: pkgName, - Tests: []BatchPackageTest{ - { - Name: "TestAnyIsolate", - }, - }, - }, - }, - }, - { - OS: OS{ - Type: Linux, - Arch: AMD64, - }, - Isolate: true, - Tests: []BatchPackageTests{ - { - Name: pkgName, - Tests: []BatchPackageTest{ - { - Name: "TestAnyIsolate", - }, - }, - }, - }, - }, - { - OS: OS{ - Type: Linux, - Arch: ARM64, - }, - Isolate: true, - Tests: []BatchPackageTests{ - { - Name: pkgName, - Tests: []BatchPackageTest{ - { - Name: "TestAnyIsolate", - }, - }, - }, - }, - }, - { - OS: OS{ - Type: Windows, - Arch: AMD64, - }, - Isolate: true, - Tests: []BatchPackageTests{ - { - Name: pkgName, - Tests: []BatchPackageTest{ - { - Name: "TestAnyIsolate", - }, - }, - }, - }, - }, - { - OS: OS{ - Type: Darwin, - Arch: AMD64, - }, - Isolate: true, - Tests: []BatchPackageTests{ - { - Name: pkgName, - Tests: []BatchPackageTest{ - { - Name: "TestDarwinIsolate", - }, - }, - }, + Type: Linux, + Arch: ARM64, + Version: "20.04", + Distro: "ubuntu", }, - }, - { - OS: OS{ - Type: Darwin, - Arch: ARM64, + Stack: &Stack{ + Version: "8.8.0", }, - Isolate: true, Tests: []BatchPackageTests{ { Name: pkgName, Tests: []BatchPackageTest{ { - Name: "TestDarwinIsolate", + Name: "TestGroup_One_One", + Stack: true, }, - }, - }, - }, - }, - { - OS: OS{ - Type: Linux, - Arch: AMD64, - }, - Isolate: true, - Tests: []BatchPackageTests{ - { - Name: pkgName, - Tests: []BatchPackageTest{ { - Name: "TestLinuxIsolate", + Name: "TestGroup_One_Two", + Stack: true, }, }, }, }, }, { + Group: "two", OS: OS{ Type: Linux, Arch: ARM64, }, - Isolate: true, Tests: []BatchPackageTests{ { Name: pkgName, Tests: []BatchPackageTest{ { - Name: "TestLinuxIsolate", + Name: "TestGroup_Two_One", }, - }, - }, - }, - }, - { - OS: OS{ - Type: Windows, - Arch: AMD64, - }, - Isolate: true, - Tests: []BatchPackageTests{ - { - Name: pkgName, - Tests: []BatchPackageTest{ { - Name: "TestWindowsIsolate", + Name: "TestGroup_Two_Two", }, }, }, @@ -344,6 +226,7 @@ var testLinuxLocalTests = []BatchPackageTest{ var testLinuxLocalBatch = []Batch{ { + Group: Default, OS: OS{ Type: "linux", Arch: "amd64", @@ -356,6 +239,7 @@ var testLinuxLocalBatch = []Batch{ }, }, { + Group: Default, OS: OS{ Type: "linux", Arch: "arm64", diff --git a/pkg/testing/define/requirements.go b/pkg/testing/define/requirements.go index 2cfc4a35cb4..c62f874009f 100644 --- a/pkg/testing/define/requirements.go +++ b/pkg/testing/define/requirements.go @@ -11,6 +11,11 @@ import ( "github.com/elastic/elastic-agent/pkg/component" ) +const ( + // Default constant can be used as the default group for tests. + Default = "default" +) + const ( // Darwin is macOS platform Darwin = component.Darwin @@ -82,6 +87,13 @@ type Stack struct { // Requirements defines the testing requirements for the test to run. type Requirements struct { + // Group must be set on each test to define which group the tests belongs to. + // Tests that are in the same group are executed on the same runner. + // + // Useful when tests take a long time to complete and sharding them across multiple + // hosts can improve the total amount of time to complete all the tests. + Group string `json:"group"` + // OS defines the operating systems this test can run on. In the case // multiple are provided the test is ran multiple times one time on each // combination. @@ -97,10 +109,6 @@ type Requirements struct { // when a full test run is performed. Local bool `json:"local"` - // Isolate defines that this test must be isolated to its own dedicated VM and the test - // cannot be shared with other tests. - Isolate bool `json:"isolate"` - // Sudo defines that this test must run under superuser permissions. On Mac and Linux the // test gets executed under sudo and on Windows it gets run under Administrator. Sudo bool `json:"sudo"` @@ -108,6 +116,9 @@ type Requirements struct { // Validate returns an error if not valid. func (r Requirements) Validate() error { + if r.Group == "" { + return errors.New("group is required") + } for i, o := range r.OS { if err := o.Validate(); err != nil { return fmt.Errorf("invalid os %d: %w", i, err) diff --git a/pkg/testing/define/testdata/sample_test.go b/pkg/testing/define/testdata/sample_test.go index 51f9e158de8..01095aceaac 100644 --- a/pkg/testing/define/testdata/sample_test.go +++ b/pkg/testing/define/testdata/sample_test.go @@ -14,24 +14,21 @@ import ( func TestAnyLocal(t *testing.T) { define.Require(t, define.Requirements{ + Group: define.Default, Local: true, }) } func TestAnySudo(t *testing.T) { define.Require(t, define.Requirements{ - Sudo: true, - }) -} - -func TestAnyIsolate(t *testing.T) { - define.Require(t, define.Requirements{ - Isolate: true, + Group: define.Default, + Sudo: true, }) } func TestDarwinLocal(t *testing.T) { define.Require(t, define.Requirements{ + Group: define.Default, OS: []define.OS{ { Type: define.Darwin, @@ -43,6 +40,7 @@ func TestDarwinLocal(t *testing.T) { func TestDarwinSudo(t *testing.T) { define.Require(t, define.Requirements{ + Group: define.Default, OS: []define.OS{ { Type: define.Darwin, @@ -52,19 +50,9 @@ func TestDarwinSudo(t *testing.T) { }) } -func TestDarwinIsolate(t *testing.T) { - define.Require(t, define.Requirements{ - OS: []define.OS{ - { - Type: define.Darwin, - }, - }, - Isolate: true, - }) -} - func TestLinuxLocal(t *testing.T) { define.Require(t, define.Requirements{ + Group: define.Default, OS: []define.OS{ { Type: define.Linux, @@ -76,6 +64,7 @@ func TestLinuxLocal(t *testing.T) { func TestLinuxSudo(t *testing.T) { define.Require(t, define.Requirements{ + Group: define.Default, OS: []define.OS{ { Type: define.Linux, @@ -85,52 +74,61 @@ func TestLinuxSudo(t *testing.T) { }) } -func TestLinuxIsolate(t *testing.T) { +func TestWindowsLocal(t *testing.T) { define.Require(t, define.Requirements{ + Group: define.Default, OS: []define.OS{ { - Type: define.Linux, + Type: define.Windows, }, }, - Isolate: true, + Local: true, }) } -func TestWindowsLocal(t *testing.T) { +func TestWindowsSudo(t *testing.T) { define.Require(t, define.Requirements{ + Group: define.Default, OS: []define.OS{ { Type: define.Windows, }, }, - Local: true, + Sudo: true, }) } -func TestWindowsSudo(t *testing.T) { +func TestSpecificCombinationOne(t *testing.T) { define.Require(t, define.Requirements{ + Group: define.Default, OS: []define.OS{ { - Type: define.Windows, + Type: define.Linux, + Arch: define.ARM64, + Distro: "ubuntu", + Version: "20.04", }, }, - Sudo: true, }) } -func TestWindowsIsolate(t *testing.T) { +func TestSpecificCombinationTwo(t *testing.T) { define.Require(t, define.Requirements{ + Group: define.Default, OS: []define.OS{ { - Type: define.Windows, + Type: define.Linux, + Arch: define.ARM64, + Distro: "ubuntu", + Version: "20.04", }, }, - Isolate: true, }) } -func TestSpecificCombinationOne(t *testing.T) { +func TestSpecificCombinationWithCloud(t *testing.T) { define.Require(t, define.Requirements{ + Group: define.Default, OS: []define.OS{ { Type: define.Linux, @@ -139,11 +137,15 @@ func TestSpecificCombinationOne(t *testing.T) { Version: "20.04", }, }, + Stack: &define.Stack{ + Version: "8.8.0", + }, }) } -func TestSpecificCombinationTwo(t *testing.T) { +func TestGroup_One_One(t *testing.T) { define.Require(t, define.Requirements{ + Group: "one", OS: []define.OS{ { Type: define.Linux, @@ -152,11 +154,15 @@ func TestSpecificCombinationTwo(t *testing.T) { Version: "20.04", }, }, + Stack: &define.Stack{ + Version: "8.8.0", + }, }) } -func TestSpecificCombinationWithCloud(t *testing.T) { +func TestGroup_One_Two(t *testing.T) { define.Require(t, define.Requirements{ + Group: "one", OS: []define.OS{ { Type: define.Linux, @@ -170,3 +176,27 @@ func TestSpecificCombinationWithCloud(t *testing.T) { }, }) } + +func TestGroup_Two_One(t *testing.T) { + define.Require(t, define.Requirements{ + Group: "two", + OS: []define.OS{ + { + Type: define.Linux, + Arch: define.ARM64, + }, + }, + }) +} + +func TestGroup_Two_Two(t *testing.T) { + define.Require(t, define.Requirements{ + Group: "two", + OS: []define.OS{ + { + Type: define.Linux, + Arch: define.ARM64, + }, + }, + }) +} diff --git a/pkg/testing/fixture.go b/pkg/testing/fixture.go index 72c69cdf404..84fa94a3641 100644 --- a/pkg/testing/fixture.go +++ b/pkg/testing/fixture.go @@ -282,6 +282,10 @@ func (f *Fixture) RunBeat(ctx context.Context) error { return errors.New("RunBeat() can't be run against elastic-agent") } + if _, deadlineSet := ctx.Deadline(); !deadlineSet { + f.t.Fatal("Context passed to Fixture.RunBeat() has no deadline set.") + } + var err error err = f.EnsurePrepared(ctx) if err != nil { @@ -366,6 +370,10 @@ func (f *Fixture) RunBeat(ctx context.Context) error { // The `elastic-agent.yml` generated by `Fixture.Configure` is ignored // when `Run` is called. func (f *Fixture) Run(ctx context.Context, states ...State) error { + if _, deadlineSet := ctx.Deadline(); !deadlineSet { + f.t.Fatal("Context passed to Fixture.Run() has no deadline set.") + } + if f.binaryName != "elastic-agent" { return errors.New("Run() can only be used with elastic-agent, use RunBeat()") } diff --git a/pkg/testing/ogc/provisioner.go b/pkg/testing/ogc/provisioner.go index 54054d27bca..696fd90c974 100644 --- a/pkg/testing/ogc/provisioner.go +++ b/pkg/testing/ogc/provisioner.go @@ -9,7 +9,6 @@ import ( "context" "fmt" "os" - "path" "path/filepath" "strings" "time" @@ -296,16 +295,6 @@ func osBatchToOGC(cacheDir string, batch runner.OSBatch) Layout { } else { tags = append(tags, strings.ToLower(fmt.Sprintf("%s-%s", batch.OS.Type, strings.Replace(batch.OS.Version, ".", "-", -1)))) } - if batch.Batch.Isolate { - tags = append(tags, "isolate") - var test define.BatchPackageTests - if len(batch.Batch.SudoTests) > 0 { - test = batch.Batch.SudoTests[0] - } else if len(batch.Batch.Tests) > 0 { - test = batch.Batch.Tests[0] - } - tags = append(tags, fmt.Sprintf("%s-%s", path.Base(test.Name), strings.ToLower(test.Tests[0].Name))) - } los, _ := findOSLayout(batch.OS.OS) return Layout{ Name: batch.ID, diff --git a/pkg/testing/runner/config.go b/pkg/testing/runner/config.go index 1448fa197dd..22494ed58e3 100644 --- a/pkg/testing/runner/config.go +++ b/pkg/testing/runner/config.go @@ -32,6 +32,10 @@ type Config struct { // this is used to copy the .tar.gz to the remote host BinaryName string + // Groups filters the tests to only run tests that are part of + // the groups defined in this list. + Groups []string + // Matrix enables matrix testing. This explodes each test to // run on all supported platforms the runner supports. Matrix bool diff --git a/pkg/testing/runner/runner.go b/pkg/testing/runner/runner.go index aab9b695a2b..f6a304c1de5 100644 --- a/pkg/testing/runner/runner.go +++ b/pkg/testing/runner/runner.go @@ -160,11 +160,13 @@ func NewRunner(cfg Config, ip InstanceProvisioner, sp StackProvisioner, batches var osBatches []OSBatch for _, b := range batches { - lbs, err := createBatches(b, platforms, cfg.Matrix) + lbs, err := createBatches(b, platforms, cfg.Groups, cfg.Matrix) if err != nil { return nil, err } - osBatches = append(osBatches, lbs...) + if lbs != nil { + osBatches = append(osBatches, lbs...) + } } if cfg.SingleTest != "" { osBatches, err = filterSingleTest(osBatches, cfg.SingleTest) @@ -888,8 +890,20 @@ func findBatchByID(id string, batches []OSBatch) (OSBatch, bool) { return OSBatch{}, false } -func createBatches(batch define.Batch, platforms []define.OS, matrix bool) ([]OSBatch, error) { +func batchInGroups(batch define.Batch, groups []string) bool { + for _, g := range groups { + if batch.Group == g { + return true + } + } + return false +} + +func createBatches(batch define.Batch, platforms []define.OS, groups []string, matrix bool) ([]OSBatch, error) { var batches []OSBatch + if len(groups) > 0 && !batchInGroups(batch, groups) { + return nil, nil + } specifics, err := getSupported(batch.OS, platforms) if errors.Is(err, ErrOSNotSupported) { var s SupportedOS @@ -1011,16 +1025,7 @@ func createBatchID(batch OSBatch) string { id += "-" + batch.OS.Distro } id += "-" + strings.Replace(batch.OS.Version, ".", "", -1) - if batch.Batch.Isolate { - if len(batch.Batch.Tests) > 0 { - // only ever has one test in an isolated batch - id += "-" + batch.Batch.Tests[0].Tests[0].Name - } - if len(batch.Batch.SudoTests) > 0 { - // only ever has one test in an isolated batch - id += "-" + batch.Batch.SudoTests[0].Tests[0].Name - } - } + id += "-" + strings.Replace(batch.Batch.Group, ".", "", -1) // The batchID needs to be at most 63 characters long otherwise // OGC will fail to instantiate the VM. diff --git a/pkg/testing/tools/estools/elasticsearch.go b/pkg/testing/tools/estools/elasticsearch.go index 1c85ed788f3..c87172ff011 100644 --- a/pkg/testing/tools/estools/elasticsearch.go +++ b/pkg/testing/tools/estools/elasticsearch.go @@ -362,7 +362,7 @@ func FindMatchingLogLinesWithContext(ctx context.Context, client elastictranspor return Documents{}, fmt.Errorf("error creating ES query: %w", err) } - return performQueryForRawQuery(ctx, queryRaw, "*ds-logs*", client) + return performQueryForRawQuery(ctx, queryRaw, "logs-elastic_agent*", client) } @@ -434,8 +434,7 @@ func CheckForErrorsInLogsWithContext(ctx context.Context, client elastictranspor return Documents{}, fmt.Errorf("error creating ES query: %w", err) } - return performQueryForRawQuery(ctx, queryRaw, "*ds-logs*", client) - + return performQueryForRawQuery(ctx, queryRaw, "logs-elastic_agent*", client) } // GetLogsForDataset returns any logs associated with the datastream @@ -461,7 +460,7 @@ func GetLogsForAgentID(client elastictransport.Interface, id string) (Documents, es := esapi.New(client) res, err := es.Search( - es.Search.WithIndex("*.ds-logs*"), + es.Search.WithIndex("logs-elastic_agent*"), es.Search.WithExpandWildcards("all"), es.Search.WithBody(&buf), es.Search.WithTrackTotalHits(true), @@ -488,7 +487,7 @@ func GetLogsForDatasetWithContext(ctx context.Context, client elastictransport.I }, } - return performQueryForRawQuery(ctx, indexQuery, "*ds-logs*", client) + return performQueryForRawQuery(ctx, indexQuery, "logs-elastic_agent*", client) } // GetPing performs a basic ping and returns ES config info diff --git a/pkg/testing/tools/testcontext/testcontext.go b/pkg/testing/tools/testcontext/testcontext.go index 02971ef7413..c2ec6bf5619 100644 --- a/pkg/testing/tools/testcontext/testcontext.go +++ b/pkg/testing/tools/testcontext/testcontext.go @@ -22,3 +22,14 @@ func WithDeadline( ctx, cancel := context.WithDeadline(parent, deadline) return ctx, cancel } + +// WithTimeout returns a context with a deadline calculated from the provided +// timeout duration. It is the equivalent of calling WithDeadline with the +// deadline specified as time.Now() + timeout. +func WithTimeout( + t *testing.T, + parentCtx context.Context, + timeout time.Duration, +) (context.Context, context.CancelFunc) { + return WithDeadline(t, parentCtx, time.Now().Add(timeout)) +} diff --git a/testing/integration/apm_propagation_test.go b/testing/integration/apm_propagation_test.go index 62aa87b0d83..c1c573773c8 100644 --- a/testing/integration/apm_propagation_test.go +++ b/testing/integration/apm_propagation_test.go @@ -53,6 +53,7 @@ agent.monitoring: func TestAPMConfig(t *testing.T) { info := define.Require(t, define.Requirements{ + Group: Default, Stack: &define.Stack{}, }) f, err := define.NewFixture(t, define.Version()) diff --git a/testing/integration/beats_serverless_test.go b/testing/integration/beats_serverless_test.go index 57123e9142e..730d1d5075e 100644 --- a/testing/integration/beats_serverless_test.go +++ b/testing/integration/beats_serverless_test.go @@ -50,6 +50,7 @@ type BeatRunner struct { func TestBeatsServerless(t *testing.T) { info := define.Require(t, define.Requirements{ + Group: Default, OS: []define.OS{ {Type: define.Linux}, }, diff --git a/testing/integration/diagnostics_test.go b/testing/integration/diagnostics_test.go index 00d8d97aed0..3f5fcdfaf95 100644 --- a/testing/integration/diagnostics_test.go +++ b/testing/integration/diagnostics_test.go @@ -16,6 +16,7 @@ import ( "path/filepath" "strings" "testing" + "time" "github.com/stretchr/testify/assert" "github.com/stretchr/testify/require" @@ -24,6 +25,7 @@ import ( "github.com/elastic/elastic-agent/pkg/core/process" integrationtest "github.com/elastic/elastic-agent/pkg/testing" "github.com/elastic/elastic-agent/pkg/testing/define" + "github.com/elastic/elastic-agent/pkg/testing/tools/testcontext" ) const diagnosticsArchiveGlobPattern = "elastic-agent-diagnostics-*.zip" @@ -88,13 +90,14 @@ type componentAndUnitNames struct { func TestDiagnosticsOptionalValues(t *testing.T) { define.Require(t, define.Requirements{ + Group: Default, Local: false, }) fixture, err := define.NewFixture(t, define.Version()) require.NoError(t, err) - ctx, cancel := context.WithCancel(context.Background()) + ctx, cancel := testcontext.WithDeadline(t, context.Background(), time.Now().Add(10*time.Minute)) defer cancel() err = fixture.Prepare(ctx, fakeComponent, fakeShipper) require.NoError(t, err) @@ -113,20 +116,18 @@ func TestDiagnosticsOptionalValues(t *testing.T) { func TestDiagnosticsCommand(t *testing.T) { define.Require(t, define.Requirements{ + Group: Default, Local: false, }) f, err := define.NewFixture(t, define.Version()) require.NoError(t, err) - ctx, cancel := context.WithCancel(context.Background()) + ctx, cancel := testcontext.WithDeadline(t, context.Background(), time.Now().Add(10*time.Minute)) defer cancel() err = f.Prepare(ctx, fakeComponent, fakeShipper) require.NoError(t, err) - ctx, cancel = context.WithCancel(context.Background()) - defer cancel() - err = f.Run(ctx, integrationtest.State{ Configure: simpleConfig2, AgentState: integrationtest.NewClientState(client.Healthy), diff --git a/testing/integration/endpoint_security_test.go b/testing/integration/endpoint_security_test.go index e58afa72c45..2338cd764e0 100644 --- a/testing/integration/endpoint_security_test.go +++ b/testing/integration/endpoint_security_test.go @@ -74,10 +74,10 @@ var protectionTests = []struct { // test automatically. func TestInstallAndCLIUninstallWithEndpointSecurity(t *testing.T) { info := define.Require(t, define.Requirements{ - Stack: &define.Stack{}, - Local: false, // requires Agent installation - Isolate: false, - Sudo: true, // requires Agent installation + Group: Fleet, + Stack: &define.Stack{}, + Local: false, // requires Agent installation + Sudo: true, // requires Agent installation OS: []define.OS{ {Type: define.Linux}, }, @@ -100,10 +100,10 @@ func TestInstallAndCLIUninstallWithEndpointSecurity(t *testing.T) { // but at this point endpoint is already uninstalled. func TestInstallAndUnenrollWithEndpointSecurity(t *testing.T) { info := define.Require(t, define.Requirements{ - Stack: &define.Stack{}, - Local: false, // requires Agent installation - Isolate: false, - Sudo: true, // requires Agent installation + Group: Fleet, + Stack: &define.Stack{}, + Local: false, // requires Agent installation + Sudo: true, // requires Agent installation OS: []define.OS{ {Type: define.Linux}, }, @@ -128,10 +128,10 @@ func TestInstallAndUnenrollWithEndpointSecurity(t *testing.T) { func TestInstallWithEndpointSecurityAndRemoveEndpointIntegration(t *testing.T) { info := define.Require(t, define.Requirements{ - Stack: &define.Stack{}, - Local: false, // requires Agent installation - Isolate: false, - Sudo: true, // requires Agent installation + Group: Fleet, + Stack: &define.Stack{}, + Local: false, // requires Agent installation + Sudo: true, // requires Agent installation OS: []define.OS{ {Type: define.Linux}, }, @@ -241,7 +241,7 @@ func testInstallAndUnenrollWithEndpointSecurity(t *testing.T, info *define.Info, Force: true, } - ctx, cn := context.WithCancel(context.Background()) + ctx, cn := testcontext.WithDeadline(t, context.Background(), time.Now().Add(10*time.Minute)) defer cn() policy, err := tools.InstallAgentWithPolicy(ctx, t, installOpts, fixture, info.KibanaClient, createPolicyReq) @@ -353,7 +353,7 @@ func testInstallWithEndpointSecurityAndRemoveEndpointIntegration(t *testing.T, i Force: true, } - ctx, cn := context.WithCancel(context.Background()) + ctx, cn := testcontext.WithDeadline(t, context.Background(), time.Now().Add(10*time.Minute)) defer cn() policy, err := tools.InstallAgentWithPolicy(ctx, t, installOpts, fixture, info.KibanaClient, createPolicyReq) @@ -491,13 +491,13 @@ func installElasticDefendPackage(t *testing.T, info *define.Info, policyID strin // path other than default func TestEndpointSecurityNonDefaultBasePath(t *testing.T) { info := define.Require(t, define.Requirements{ - Stack: &define.Stack{}, - Local: false, // requires Agent installation - Isolate: false, - Sudo: true, // requires Agent installation + Group: Fleet, + Stack: &define.Stack{}, + Local: false, // requires Agent installation + Sudo: true, // requires Agent installation }) - ctx, cn := context.WithCancel(context.Background()) + ctx, cn := testcontext.WithDeadline(t, context.Background(), time.Now().Add(10*time.Minute)) defer cn() // Get path to agent executable. @@ -527,7 +527,7 @@ func TestEndpointSecurityNonDefaultBasePath(t *testing.T) { pkgPolicyResp, err := installElasticDefendPackage(t, info, policyResp.ID) require.NoErrorf(t, err, "Policy Response was: %v", pkgPolicyResp) - ctx, cancel := context.WithCancel(context.Background()) + ctx, cancel := testcontext.WithDeadline(t, context.Background(), time.Now().Add(10*time.Minute)) defer cancel() c := fixture.Client() diff --git a/testing/integration/fake_test.go b/testing/integration/fake_test.go index cb685e4cb4a..cf58ba11c4a 100644 --- a/testing/integration/fake_test.go +++ b/testing/integration/fake_test.go @@ -16,6 +16,7 @@ import ( "github.com/elastic/elastic-agent/pkg/control/v2/client" atesting "github.com/elastic/elastic-agent/pkg/testing" "github.com/elastic/elastic-agent/pkg/testing/define" + "github.com/elastic/elastic-agent/pkg/testing/tools/testcontext" ) var simpleConfig1 = ` @@ -44,13 +45,14 @@ inputs: func TestFakeComponent(t *testing.T) { define.Require(t, define.Requirements{ + Group: Default, Local: true, }) f, err := define.NewFixture(t, define.Version()) require.NoError(t, err) - ctx, cancel := context.WithCancel(context.Background()) + ctx, cancel := testcontext.WithDeadline(t, context.Background(), time.Now().Add(10*time.Minute)) defer cancel() err = f.Prepare(ctx, fakeComponent, fakeShipper) require.NoError(t, err) diff --git a/testing/integration/fqdn_test.go b/testing/integration/fqdn_test.go index 9bb1b34de89..6c88a2cffe1 100644 --- a/testing/integration/fqdn_test.go +++ b/testing/integration/fqdn_test.go @@ -27,11 +27,13 @@ import ( "github.com/elastic/elastic-agent/pkg/testing/define" "github.com/elastic/elastic-agent/pkg/testing/tools" "github.com/elastic/elastic-agent/pkg/testing/tools/fleettools" + "github.com/elastic/elastic-agent/pkg/testing/tools/testcontext" "github.com/elastic/go-elasticsearch/v8" ) func TestFQDN(t *testing.T) { info := define.Require(t, define.Requirements{ + Group: Default, // placed in default only because its skipped OS: []define.OS{ {Type: define.Linux}, }, @@ -39,7 +41,6 @@ func TestFQDN(t *testing.T) { Local: false, Sudo: true, }) - t.Skip("Flaky test, see https://github.com/elastic/elastic-agent/issues/3154") agentFixture, err := define.NewFixture(t, define.Version()) require.NoError(t, err) @@ -51,11 +52,13 @@ func TestFQDN(t *testing.T) { origEtcHosts, err := getEtcHosts() require.NoError(t, err) + ctx, cancel := testcontext.WithDeadline(t, context.Background(), time.Now().Add(10*time.Minute)) + defer cancel() + // Save original hostname so we can restore it at the end of each test - origHostname, err := getHostname(context.Background()) + origHostname, err := getHostname(ctx) require.NoError(t, err) - ctx := context.Background() kibClient := info.KibanaClient shortName := strings.ToLower(randStr(6)) @@ -92,7 +95,10 @@ func TestFQDN(t *testing.T) { assert.NoError(t, fleettools.UnEnrollAgent(info.KibanaClient, policy.ID)) t.Log("Restoring hostname...") - err := setHostname(context.Background(), origHostname, t.Log) + ctx, cancel := testcontext.WithTimeout(t, context.Background(), 1*time.Minute) + defer cancel() + + err := setHostname(ctx, origHostname, t.Log) require.NoError(t, err) t.Log("Restoring original /etc/hosts...") diff --git a/testing/integration/groups_test.go b/testing/integration/groups_test.go new file mode 100644 index 00000000000..64d8cd7cf02 --- /dev/null +++ b/testing/integration/groups_test.go @@ -0,0 +1,23 @@ +// Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one +// or more contributor license agreements. Licensed under the Elastic License; +// you may not use this file except in compliance with the Elastic License. + +//go:build integration + +package integration + +import "github.com/elastic/elastic-agent/pkg/testing/define" + +const ( + // Default group. + Default = define.Default + + // Fleet group of tests. Used for testing Elastic Agent with Fleet. + Fleet = "fleet" + + // FleetAirgapped group of tests. Used for testing Elastic Agent with Fleet and airgapped. + FleetAirgapped = "fleet-airgapped" + + // Upgrade group of tests. Used for testing upgrades. + Upgrade = "upgrade" +) diff --git a/testing/integration/install_test.go b/testing/integration/install_test.go index e2e9d0206cf..8e8407a9dd8 100644 --- a/testing/integration/install_test.go +++ b/testing/integration/install_test.go @@ -18,12 +18,14 @@ import ( atesting "github.com/elastic/elastic-agent/pkg/testing" "github.com/elastic/elastic-agent/pkg/testing/define" + "github.com/elastic/elastic-agent/pkg/testing/tools/testcontext" "github.com/stretchr/testify/require" ) func TestInstallWithoutBasePath(t *testing.T) { define.Require(t, define.Requirements{ + Group: Default, // We require sudo for this test to run // `elastic-agent install`. Sudo: true, @@ -37,8 +39,11 @@ func TestInstallWithoutBasePath(t *testing.T) { fixture, err := define.NewFixture(t, define.Version()) require.NoError(t, err) + ctx, cancel := testcontext.WithDeadline(t, context.Background(), time.Now().Add(10*time.Minute)) + defer cancel() + // Prepare the Elastic Agent so the binary is extracted and ready to use. - err = fixture.Prepare(context.Background()) + err = fixture.Prepare(ctx) require.NoError(t, err) // Check that default base path is clean @@ -58,7 +63,7 @@ func TestInstallWithoutBasePath(t *testing.T) { // Run `elastic-agent install`. We use `--force` to prevent interactive // execution. - out, err := fixture.Install(context.Background(), &atesting.InstallOpts{Force: true}) + out, err := fixture.Install(ctx, &atesting.InstallOpts{Force: true}) if err != nil { t.Logf("install output: %s", out) require.NoError(t, err) @@ -66,11 +71,12 @@ func TestInstallWithoutBasePath(t *testing.T) { // Check that Agent was installed in default base path checkInstallSuccess(t, topPath) - t.Run("check agent package version", testAgentPackageVersion(context.Background(), fixture, true)) + t.Run("check agent package version", testAgentPackageVersion(ctx, fixture, true)) } func TestInstallWithBasePath(t *testing.T) { define.Require(t, define.Requirements{ + Group: Default, // We require sudo for this test to run // `elastic-agent install`. Sudo: true, @@ -84,8 +90,11 @@ func TestInstallWithBasePath(t *testing.T) { fixture, err := define.NewFixture(t, define.Version()) require.NoError(t, err) + ctx, cancel := testcontext.WithDeadline(t, context.Background(), time.Now().Add(10*time.Minute)) + defer cancel() + // Prepare the Elastic Agent so the binary is extracted and ready to use. - err = fixture.Prepare(context.Background()) + err = fixture.Prepare(ctx) require.NoError(t, err) // Set up random temporary directory to serve as base path for Elastic Agent @@ -95,7 +104,7 @@ func TestInstallWithBasePath(t *testing.T) { // Run `elastic-agent install`. We use `--force` to prevent interactive // execution. - out, err := fixture.Install(context.Background(), &atesting.InstallOpts{ + out, err := fixture.Install(ctx, &atesting.InstallOpts{ BasePath: randomBasePath, Force: true, }) @@ -107,7 +116,7 @@ func TestInstallWithBasePath(t *testing.T) { // Check that Agent was installed in the custom base path topPath := filepath.Join(randomBasePath, "Elastic", "Agent") checkInstallSuccess(t, topPath) - t.Run("check agent package version", testAgentPackageVersion(context.Background(), fixture, true)) + t.Run("check agent package version", testAgentPackageVersion(ctx, fixture, true)) } func checkInstallSuccess(t *testing.T, topPath string) { diff --git a/testing/integration/install_unprivileged_test.go b/testing/integration/install_unprivileged_test.go index 6c260a42de0..a665a306659 100644 --- a/testing/integration/install_unprivileged_test.go +++ b/testing/integration/install_unprivileged_test.go @@ -23,10 +23,12 @@ import ( "github.com/elastic/elastic-agent/internal/pkg/agent/install" atesting "github.com/elastic/elastic-agent/pkg/testing" "github.com/elastic/elastic-agent/pkg/testing/define" + "github.com/elastic/elastic-agent/pkg/testing/tools/testcontext" ) func TestInstallUnprivilegedWithoutBasePath(t *testing.T) { define.Require(t, define.Requirements{ + Group: Default, // We require sudo for this test to run // `elastic-agent install` (even though it will // be installed as non-root). @@ -48,8 +50,11 @@ func TestInstallUnprivilegedWithoutBasePath(t *testing.T) { fixture, err := define.NewFixture(t, define.Version()) require.NoError(t, err) + ctx, cancel := testcontext.WithDeadline(t, context.Background(), time.Now().Add(10*time.Minute)) + defer cancel() + // Prepare the Elastic Agent so the binary is extracted and ready to use. - err = fixture.Prepare(context.Background()) + err = fixture.Prepare(ctx) require.NoError(t, err) // Check that default base path is clean @@ -69,7 +74,7 @@ func TestInstallUnprivilegedWithoutBasePath(t *testing.T) { // Run `elastic-agent install`. We use `--force` to prevent interactive // execution. - out, err := fixture.Install(context.Background(), &atesting.InstallOpts{Force: true, Unprivileged: true}) + out, err := fixture.Install(ctx, &atesting.InstallOpts{Force: true, Unprivileged: true}) if err != nil { t.Logf("install output: %s", out) require.NoError(t, err) @@ -80,6 +85,7 @@ func TestInstallUnprivilegedWithoutBasePath(t *testing.T) { func TestInstallUnprivilegedWithBasePath(t *testing.T) { define.Require(t, define.Requirements{ + Group: Default, // We require sudo for this test to run // `elastic-agent install` (even though it will // be installed as non-root). @@ -101,8 +107,11 @@ func TestInstallUnprivilegedWithBasePath(t *testing.T) { fixture, err := define.NewFixture(t, define.Version()) require.NoError(t, err) + ctx, cancel := testcontext.WithDeadline(t, context.Background(), time.Now().Add(10*time.Minute)) + defer cancel() + // Prepare the Elastic Agent so the binary is extracted and ready to use. - err = fixture.Prepare(context.Background()) + err = fixture.Prepare(ctx) require.NoError(t, err) // Other test `TestInstallWithBasePath` uses a random directory for the base @@ -123,7 +132,7 @@ func TestInstallUnprivilegedWithBasePath(t *testing.T) { // Run `elastic-agent install`. We use `--force` to prevent interactive // execution. - out, err := fixture.Install(context.Background(), &atesting.InstallOpts{ + out, err := fixture.Install(ctx, &atesting.InstallOpts{ BasePath: basePath, Force: true, Unprivileged: true, diff --git a/testing/integration/logs_ingestion_test.go b/testing/integration/logs_ingestion_test.go index d9fb2f511a8..5227e1026e2 100644 --- a/testing/integration/logs_ingestion_test.go +++ b/testing/integration/logs_ingestion_test.go @@ -33,16 +33,20 @@ import ( "github.com/elastic/elastic-agent/pkg/testing/tools/check" "github.com/elastic/elastic-agent/pkg/testing/tools/estools" "github.com/elastic/elastic-agent/pkg/testing/tools/fleettools" + "github.com/elastic/elastic-agent/pkg/testing/tools/testcontext" "github.com/elastic/elastic-transport-go/v8/elastictransport" ) func TestLogIngestionFleetManaged(t *testing.T) { info := define.Require(t, define.Requirements{ + Group: Fleet, Stack: &define.Stack{}, Local: false, Sudo: true, }) - ctx := context.Background() + + ctx, cancel := testcontext.WithDeadline(t, context.Background(), time.Now().Add(10*time.Minute)) + defer cancel() agentFixture, err := define.NewFixture(t, define.Version()) require.NoError(t, err) @@ -104,15 +108,12 @@ func testMonitoringLogsAreShipped( ) { // Stage 1: Make sure metricbeat logs are populated t.Log("Making sure metricbeat logs are populated") - require.Eventually(t, - func() bool { - docs := findESDocs(t, func() (estools.Documents, error) { - return estools.GetLogsForDataset(info.ESClient, "elastic_agent.metricbeat") - }) - return len(docs.Hits.Hits) > 0 - }, - 1*time.Minute, 500*time.Millisecond, - "there should be metricbeats logs by now") + docs := findESDocs(t, func() (estools.Documents, error) { + return estools.GetLogsForDataset(info.ESClient, "elastic_agent.metricbeat") + }) + t.Logf("metricbeat: Got %d documents", len(docs.Hits.Hits)) + require.NotZero(t, len(docs.Hits.Hits), + "Looking for logs in dataset 'elastic_agent.metricbeat'") // Stage 2: make sure all components are healthy t.Log("Making sure all components are healthy") @@ -127,7 +128,7 @@ func testMonitoringLogsAreShipped( // Stage 3: Make sure there are no errors in logs t.Log("Making sure there are no error logs") - docs := findESDocs(t, func() (estools.Documents, error) { + docs = queryESDocs(t, func() (estools.Documents, error) { return estools.CheckForErrorsInLogs(info.ESClient, info.Namespace, []string{ // acceptable error messages (include reason) "Error dialing dial tcp 127.0.0.1:9200: connect: connection refused", // beat is running default config before its config gets updated @@ -136,6 +137,12 @@ func testMonitoringLogsAreShipped( "Failed to initialize artifact", "Failed to apply initial policy from on disk configuration", "elastic-agent-client error: rpc error: code = Canceled desc = context canceled", // can happen on restart + "add_cloud_metadata: received error failed requesting openstack metadata: Get \\\"https://169.254.169.254/2009-04-04/meta-data/instance-id\\\": dial tcp 169.254.169.254:443: connect: connection refused", // okay for the openstack metadata to not work + "add_cloud_metadata: received error failed requesting openstack metadata: Get \\\"https://169.254.169.254/2009-04-04/meta-data/hostname\\\": dial tcp 169.254.169.254:443: connect: connection refused", // okay for the cloud metadata to not work + "add_cloud_metadata: received error failed requesting openstack metadata: Get \\\"https://169.254.169.254/2009-04-04/meta-data/placement/availability-zone\\\": dial tcp 169.254.169.254:443: connect: connection refused", // okay for the cloud metadata to not work + "add_cloud_metadata: received error failed requesting openstack metadata: Get \\\"https://169.254.169.254/2009-04-04/meta-data/instance-type\\\": dial tcp 169.254.169.254:443: connect: connection refused", // okay for the cloud metadata to not work + "add_cloud_metadata: received error failed with http status code 404", // okay for the cloud metadata to not work + "add_cloud_metadata: received error failed fetching EC2 Identity Document: operation error ec2imds: GetInstanceIdentityDocument, http response error StatusCode: 404, request to EC2 IMDS failed", // okay for the cloud metadata to not work }) }) t.Logf("error logs: Got %d documents", len(docs.Hits.Hits)) @@ -167,7 +174,6 @@ func testMonitoringLogsAreShipped( // this field is not mapped. There is an issue for that: // https://github.com/elastic/integrations/issues/6545 // TODO: use runtime fields while the above issue is not resolved. - docs = findESDocs(t, func() (estools.Documents, error) { return estools.GetLogsForAgentID(info.ESClient, agentID) }) @@ -197,13 +203,18 @@ func testMonitoringLogsAreShipped( } } -func findESDocs(t *testing.T, findFn func() (estools.Documents, error)) estools.Documents { +// queryESDocs runs `findFn` until it returns no error. Zero documents returned +// is considered a success. +func queryESDocs(t *testing.T, findFn func() (estools.Documents, error)) estools.Documents { var docs estools.Documents require.Eventually( t, func() bool { var err error docs, err = findFn() + if err != nil { + t.Logf("got an error querying ES, retrying. Error: %s", err) + } return err == nil }, 3*time.Minute, @@ -213,6 +224,28 @@ func findESDocs(t *testing.T, findFn func() (estools.Documents, error)) estools. return docs } +// findESDocs runs `findFn` until at least one document is returned and there is no error +func findESDocs(t *testing.T, findFn func() (estools.Documents, error)) estools.Documents { + var docs estools.Documents + require.Eventually( + t, + func() bool { + var err error + docs, err = findFn() + if err != nil { + t.Logf("got an error querying ES, retrying. Error: %s", err) + return false + } + + return docs.Hits.Total.Value != 0 + }, + 3*time.Minute, + 15*time.Second, + ) + + return docs +} + func testFlattenedDatastreamFleetPolicy( t *testing.T, ctx context.Context, diff --git a/testing/integration/package_version_test.go b/testing/integration/package_version_test.go index 2b8ca6ebe90..4280603fa97 100644 --- a/testing/integration/package_version_test.go +++ b/testing/integration/package_version_test.go @@ -10,6 +10,7 @@ import ( "context" "os" "testing" + "time" "github.com/stretchr/testify/assert" "github.com/stretchr/testify/require" @@ -17,18 +18,20 @@ import ( "github.com/elastic/elastic-agent/pkg/control/v2/client" atesting "github.com/elastic/elastic-agent/pkg/testing" "github.com/elastic/elastic-agent/pkg/testing/define" + "github.com/elastic/elastic-agent/pkg/testing/tools/testcontext" "github.com/elastic/elastic-agent/version" ) func TestPackageVersion(t *testing.T) { define.Require(t, define.Requirements{ + Group: Default, Local: true, }) f, err := define.NewFixture(t, define.Version()) require.NoError(t, err) - ctx, cancel := context.WithCancel(context.Background()) + ctx, cancel := testcontext.WithDeadline(t, context.Background(), time.Now().Add(10*time.Minute)) defer cancel() err = f.Prepare(ctx, fakeComponent, fakeShipper) require.NoError(t, err) @@ -92,7 +95,7 @@ func testAfterRemovingPkgVersionFiles(ctx context.Context, f *atesting.Fixture) } testf := func() error { // check the version returned by the running agent - stdout, stderr, processState := getAgentVersionOutput(t, f, context.Background(), false) + stdout, stderr, processState := getAgentVersionOutput(t, f, ctx, false) binaryActualVersion := unmarshalVersionOutput(t, stdout, "binary") assert.Equal(t, version.GetDefaultVersion(), binaryActualVersion, "binary version does not return default beat version when the package version file is missing") diff --git a/testing/integration/pkgversion_common_test.go b/testing/integration/pkgversion_common_test.go index 7269afce4c9..a8c4285eabc 100644 --- a/testing/integration/pkgversion_common_test.go +++ b/testing/integration/pkgversion_common_test.go @@ -43,7 +43,7 @@ func testAgentPackageVersion(ctx context.Context, f *integrationtest.Fixture, bi require.NotEmpty(t, pkgVersion, "elastic agent has been packaged with an empty package version") // check the version returned by the running agent - actualVersionBytes := getAgentVersion(t, f, context.Background(), binaryOnly) + actualVersionBytes := getAgentVersion(t, f, ctx, binaryOnly) actualVersion := unmarshalVersionOutput(t, actualVersionBytes, "binary") assert.Equal(t, pkgVersion, actualVersion, "binary version does not match package version") diff --git a/testing/integration/proxy_url_test.go b/testing/integration/proxy_url_test.go index e95469405be..7a6672ae84f 100644 --- a/testing/integration/proxy_url_test.go +++ b/testing/integration/proxy_url_test.go @@ -20,6 +20,7 @@ import ( integrationtest "github.com/elastic/elastic-agent/pkg/testing" "github.com/elastic/elastic-agent/pkg/testing/define" "github.com/elastic/elastic-agent/pkg/testing/tools/check" + "github.com/elastic/elastic-agent/pkg/testing/tools/testcontext" "github.com/elastic/elastic-agent/testing/fleetservertest" "github.com/elastic/elastic-agent/testing/proxytest" "github.com/elastic/elastic-agent/version" @@ -63,7 +64,7 @@ func SetupTest(t *testing.T) *ProxyURL { integrationtest.WithLogOutput()) require.NoError(t, err, "SetupTest: NewFixture failed") - ctx, cancel := context.WithCancel(context.Background()) + ctx, cancel := testcontext.WithDeadline(t, context.Background(), time.Now().Add(10*time.Minute)) defer cancel() err = f.Prepare(ctx) @@ -79,7 +80,10 @@ func TearDownTest(t *testing.T, p *ProxyURL) { return // nothing to do } - out, err := p.fixture.Uninstall(context.Background(), + ctx, cancel := testcontext.WithDeadline(t, context.Background(), time.Now().Add(10*time.Minute)) + defer cancel() + + out, err := p.fixture.Uninstall(ctx, &integrationtest.UninstallOpts{Force: true}) if err != nil && !errors.Is(err, integrationtest.ErrNotInstalled) && @@ -91,6 +95,7 @@ func TearDownTest(t *testing.T, p *ProxyURL) { func TestProxyURL_EnrollProxyAndNoProxyInThePolicy(t *testing.T) { _ = define.Require(t, define.Requirements{ + Group: Fleet, Local: false, Sudo: true, }) @@ -114,8 +119,11 @@ func TestProxyURL_EnrollProxyAndNoProxyInThePolicy(t *testing.T) { action, ) + ctx, cancel := testcontext.WithDeadline(t, context.Background(), time.Now().Add(10*time.Minute)) + defer cancel() + out, err := p.fixture.Install( - context.Background(), + ctx, &integrationtest.InstallOpts{ Force: true, NonInteractive: true, @@ -135,6 +143,7 @@ func TestProxyURL_EnrollProxyAndNoProxyInThePolicy(t *testing.T) { func TestProxyURL_EnrollProxyAndEmptyProxyInThePolicy(t *testing.T) { _ = define.Require(t, define.Requirements{ + Group: Fleet, Local: false, Sudo: true, }) @@ -159,8 +168,12 @@ func TestProxyURL_EnrollProxyAndEmptyProxyInThePolicy(t *testing.T) { 0, action, ) + + ctx, cancel := testcontext.WithDeadline(t, context.Background(), time.Now().Add(10*time.Minute)) + defer cancel() + out, err := p.fixture.Install( - context.Background(), + ctx, &integrationtest.InstallOpts{ Force: true, NonInteractive: true, @@ -180,6 +193,7 @@ func TestProxyURL_EnrollProxyAndEmptyProxyInThePolicy(t *testing.T) { func TestProxyURL_ProxyInThePolicyTakesPrecedence(t *testing.T) { _ = define.Require(t, define.Requirements{ + Group: Fleet, Local: false, Sudo: true, }) @@ -204,8 +218,12 @@ func TestProxyURL_ProxyInThePolicyTakesPrecedence(t *testing.T) { 0, action, ) + + ctx, cancel := testcontext.WithDeadline(t, context.Background(), time.Now().Add(10*time.Minute)) + defer cancel() + out, err := p.fixture.Install( - context.Background(), + ctx, &integrationtest.InstallOpts{ Force: true, NonInteractive: true, @@ -239,6 +257,7 @@ func TestProxyURL_ProxyInThePolicyTakesPrecedence(t *testing.T) { func TestProxyURL_NoEnrollProxyAndProxyInThePolicy(t *testing.T) { _ = define.Require(t, define.Requirements{ + Group: Fleet, Local: false, Sudo: true, }) @@ -263,12 +282,16 @@ func TestProxyURL_NoEnrollProxyAndProxyInThePolicy(t *testing.T) { 0, action, ) + + ctx, cancel := testcontext.WithDeadline(t, context.Background(), time.Now().Add(10*time.Minute)) + defer cancel() + t.Logf("fleet: %s, proxy1: %s, proxy2: %s", p.fleet.LocalhostURL, p.proxy1.LocalhostURL, p.proxy2.LocalhostURL) out, err := p.fixture.Install( - context.Background(), + ctx, &integrationtest.InstallOpts{ Force: true, NonInteractive: true, @@ -302,6 +325,7 @@ func TestProxyURL_NoEnrollProxyAndProxyInThePolicy(t *testing.T) { func TestProxyURL_RemoveProxyFromThePolicy(t *testing.T) { _ = define.Require(t, define.Requirements{ + Group: Fleet, Local: false, Sudo: true, }) @@ -326,8 +350,12 @@ func TestProxyURL_RemoveProxyFromThePolicy(t *testing.T) { 0, action, ) + + ctx, cancel := testcontext.WithDeadline(t, context.Background(), time.Now().Add(10*time.Minute)) + defer cancel() + out, err := p.fixture.Install( - context.Background(), + ctx, &integrationtest.InstallOpts{ Force: true, NonInteractive: true, @@ -361,7 +389,7 @@ func TestProxyURL_RemoveProxyFromThePolicy(t *testing.T) { } // Assert the proxy is set on the agent - inspect, err := p.fixture.ExecInspect(context.Background()) + inspect, err := p.fixture.ExecInspect(ctx) require.NoError(t, err) assert.Equal(t, *p.policyData.FleetProxyURL, inspect.Fleet.ProxyURL) @@ -384,7 +412,7 @@ func TestProxyURL_RemoveProxyFromThePolicy(t *testing.T) { return p.checkinWithAcker.Acked(actionIDRemoveProxyFromPolicy) }, 30*time.Second, 5*time.Second) - inspect, err = p.fixture.ExecInspect(context.Background()) + inspect, err = p.fixture.ExecInspect(ctx) require.NoError(t, err) assert.Equal(t, inspect.Fleet.ProxyURL, want) diff --git a/testing/integration/upgrade_broken_package_test.go b/testing/integration/upgrade_broken_package_test.go index a9f090d132b..d2cb8c4662b 100644 --- a/testing/integration/upgrade_broken_package_test.go +++ b/testing/integration/upgrade_broken_package_test.go @@ -13,23 +13,26 @@ import ( "os" "path/filepath" "testing" + "time" "github.com/stretchr/testify/assert" "github.com/stretchr/testify/require" atesting "github.com/elastic/elastic-agent/pkg/testing" "github.com/elastic/elastic-agent/pkg/testing/define" + "github.com/elastic/elastic-agent/pkg/testing/tools/testcontext" "github.com/elastic/elastic-agent/testing/upgradetest" agtversion "github.com/elastic/elastic-agent/version" ) func TestUpgradeBrokenPackageVersion(t *testing.T) { define.Require(t, define.Requirements{ + Group: Upgrade, Local: false, // requires Agent installation Sudo: true, // requires Agent installation }) - ctx, cancel := context.WithCancel(context.Background()) + ctx, cancel := testcontext.WithDeadline(t, context.Background(), time.Now().Add(10*time.Minute)) defer cancel() // Start at the build version as we want to test the retry diff --git a/testing/integration/upgrade_downgrade_test.go b/testing/integration/upgrade_downgrade_test.go index ef4afd4ae41..0f36e98a99c 100644 --- a/testing/integration/upgrade_downgrade_test.go +++ b/testing/integration/upgrade_downgrade_test.go @@ -10,6 +10,7 @@ import ( "context" "strings" "testing" + "time" "github.com/stretchr/testify/assert" "github.com/stretchr/testify/require" @@ -17,12 +18,14 @@ import ( atesting "github.com/elastic/elastic-agent/pkg/testing" "github.com/elastic/elastic-agent/pkg/testing/define" "github.com/elastic/elastic-agent/pkg/testing/tools" + "github.com/elastic/elastic-agent/pkg/testing/tools/testcontext" "github.com/elastic/elastic-agent/pkg/version" "github.com/elastic/elastic-agent/testing/upgradetest" ) func TestStandaloneDowngradeToSpecificSnapshotBuild(t *testing.T) { define.Require(t, define.Requirements{ + Group: Upgrade, Local: false, // requires Agent installation Sudo: true, // requires Agent installation }) @@ -35,7 +38,7 @@ func TestStandaloneDowngradeToSpecificSnapshotBuild(t *testing.T) { t.Skipf("Version %s is lower than min version %s", define.Version(), minVersion) } - ctx, cancel := context.WithCancel(context.Background()) + ctx, cancel := testcontext.WithDeadline(t, context.Background(), time.Now().Add(10*time.Minute)) defer cancel() // retrieve all the versions of agent from the artifact API diff --git a/testing/integration/upgrade_fleet_test.go b/testing/integration/upgrade_fleet_test.go index 8d4f3b3c88d..224ee9d811f 100644 --- a/testing/integration/upgrade_fleet_test.go +++ b/testing/integration/upgrade_fleet_test.go @@ -28,7 +28,6 @@ import ( "github.com/elastic/elastic-agent-libs/kibana" atesting "github.com/elastic/elastic-agent/pkg/testing" "github.com/elastic/elastic-agent/pkg/testing/define" - "github.com/elastic/elastic-agent/pkg/testing/tools" "github.com/elastic/elastic-agent/pkg/testing/tools/check" "github.com/elastic/elastic-agent/pkg/testing/tools/fleettools" "github.com/elastic/elastic-agent/pkg/testing/tools/testcontext" @@ -41,6 +40,7 @@ import ( // versions as the standalone tests already perform those tests and would be redundant. func TestFleetManagedUpgrade(t *testing.T) { info := define.Require(t, define.Requirements{ + Group: Fleet, Stack: &define.Stack{}, Local: false, // requires Agent installation Sudo: true, // requires Agent installation @@ -91,20 +91,18 @@ func TestFleetManagedUpgrade(t *testing.T) { func TestFleetAirGappedUpgrade(t *testing.T) { stack := define.Require(t, define.Requirements{ + Group: FleetAirgapped, Stack: &define.Stack{}, // The test uses iptables to simulate the air-gaped environment. - OS: []define.OS{{Type: define.Linux}}, - Isolate: true, // Needed as the test blocks IPs using iptables. - Local: false, // Needed as the test requires Agent installation - Sudo: true, // Needed as the test uses iptables and installs the Agent + OS: []define.OS{{Type: define.Linux}}, + Local: false, // Needed as the test requires Agent installation + Sudo: true, // Needed as the test uses iptables and installs the Agent }) ctx, _ := testcontext.WithDeadline( t, context.Background(), time.Now().Add(10*time.Minute)) - artifactAPI := tools.NewArtifactAPIClient() - latest, err := artifactAPI.GetLatestSnapshotVersion(ctx, t) - require.NoError(t, err, "could not fetch latest version from artifacts API") + latest := define.Version() // We need to prepare it first because it'll download the artifact, and it // has to happen before we block the artifacts API IPs. @@ -112,14 +110,14 @@ func TestFleetAirGappedUpgrade(t *testing.T) { // uses it to get some information about the agent version. upgradeTo, err := atesting.NewFixture( t, - latest.String(), + latest, atesting.WithFetcher(atesting.ArtifactFetcher()), ) require.NoError(t, err) err = upgradeTo.Prepare(ctx) require.NoError(t, err) - s := newArtifactsServer(ctx, t, latest.String()) + s := newArtifactsServer(ctx, t, latest) host := "artifacts.elastic.co" simulateAirGapedEnvironment(t, host) diff --git a/testing/integration/upgrade_gpg_test.go b/testing/integration/upgrade_gpg_test.go index e2001dc1eca..d1d9b6f3af1 100644 --- a/testing/integration/upgrade_gpg_test.go +++ b/testing/integration/upgrade_gpg_test.go @@ -10,18 +10,21 @@ import ( "context" "strings" "testing" + "time" "github.com/stretchr/testify/require" "github.com/elastic/elastic-agent/internal/pkg/release" atesting "github.com/elastic/elastic-agent/pkg/testing" "github.com/elastic/elastic-agent/pkg/testing/define" + "github.com/elastic/elastic-agent/pkg/testing/tools/testcontext" "github.com/elastic/elastic-agent/pkg/version" "github.com/elastic/elastic-agent/testing/upgradetest" ) func TestStandaloneUpgradeWithGPGFallback(t *testing.T) { define.Require(t, define.Requirements{ + Group: Upgrade, Local: false, // requires Agent installation Sudo: true, // requires Agent installation }) @@ -34,7 +37,7 @@ func TestStandaloneUpgradeWithGPGFallback(t *testing.T) { t.Skipf("Version %s is lower than min version %s", define.Version(), minVersion) } - ctx, cancel := context.WithCancel(context.Background()) + ctx, cancel := testcontext.WithDeadline(t, context.Background(), time.Now().Add(10*time.Minute)) defer cancel() // Start at the build version as we want to test the retry @@ -77,6 +80,7 @@ func TestStandaloneUpgradeWithGPGFallback(t *testing.T) { func TestStandaloneUpgradeWithGPGFallbackOneRemoteFailing(t *testing.T) { define.Require(t, define.Requirements{ + Group: Upgrade, Local: false, // requires Agent installation Sudo: true, // requires Agent installation }) @@ -89,7 +93,7 @@ func TestStandaloneUpgradeWithGPGFallbackOneRemoteFailing(t *testing.T) { t.Skipf("Version %s is lower than min version %s", define.Version(), minVersion) } - ctx, cancel := context.WithCancel(context.Background()) + ctx, cancel := testcontext.WithDeadline(t, context.Background(), time.Now().Add(10*time.Minute)) defer cancel() // Start at the build version as we want to test the retry diff --git a/testing/integration/upgrade_rollback_test.go b/testing/integration/upgrade_rollback_test.go index c91ac967df0..3ebd5b336bd 100644 --- a/testing/integration/upgrade_rollback_test.go +++ b/testing/integration/upgrade_rollback_test.go @@ -25,6 +25,7 @@ import ( "github.com/elastic/elastic-agent/internal/pkg/agent/install" atesting "github.com/elastic/elastic-agent/pkg/testing" "github.com/elastic/elastic-agent/pkg/testing/define" + "github.com/elastic/elastic-agent/pkg/testing/tools/testcontext" "github.com/elastic/elastic-agent/pkg/version" "github.com/elastic/elastic-agent/testing/upgradetest" ) @@ -40,11 +41,12 @@ agent.upgrade.watcher: // that the Agent is rolled back to the previous version. func TestStandaloneUpgradeRollback(t *testing.T) { define.Require(t, define.Requirements{ + Group: Upgrade, Local: false, // requires Agent installation Sudo: true, // requires Agent installation }) - ctx, cancel := context.WithCancel(context.Background()) + ctx, cancel := testcontext.WithDeadline(t, context.Background(), time.Now().Add(10*time.Minute)) defer cancel() // Upgrade from an old build because the new watcher from the new build will @@ -154,11 +156,12 @@ inputs: // rolled back to the previous version. func TestStandaloneUpgradeRollbackOnRestarts(t *testing.T) { define.Require(t, define.Requirements{ + Group: Upgrade, Local: false, // requires Agent installation Sudo: true, // requires Agent installation }) - ctx, cancel := context.WithCancel(context.Background()) + ctx, cancel := testcontext.WithDeadline(t, context.Background(), time.Now().Add(10*time.Minute)) defer cancel() // Upgrade from an old build because the new watcher from the new build will diff --git a/testing/integration/upgrade_standalone_inprogress.go b/testing/integration/upgrade_standalone_inprogress.go index adc52cf0872..754a7ba935b 100644 --- a/testing/integration/upgrade_standalone_inprogress.go +++ b/testing/integration/upgrade_standalone_inprogress.go @@ -2,6 +2,8 @@ // or more contributor license agreements. Licensed under the Elastic License; // you may not use this file except in compliance with the Elastic License. +//go:build integration + package integration import ( @@ -15,6 +17,7 @@ import ( atesting "github.com/elastic/elastic-agent/pkg/testing" "github.com/elastic/elastic-agent/pkg/testing/define" + "github.com/elastic/elastic-agent/pkg/testing/tools/testcontext" "github.com/elastic/elastic-agent/testing/upgradetest" ) @@ -24,11 +27,12 @@ import ( // the second upgrade. func TestStandaloneUpgradeFailsWhenUpgradeIsInProgress(t *testing.T) { define.Require(t, define.Requirements{ + Group: Upgrade, Local: false, // requires Agent installation Sudo: true, // requires Agent installation }) - ctx, cancel := context.WithCancel(context.Background()) + ctx, cancel := testcontext.WithDeadline(t, context.Background(), time.Now().Add(10*time.Minute)) defer cancel() // For this test we start with a version of Agent that's two minors older diff --git a/testing/integration/upgrade_standalone_retry_test.go b/testing/integration/upgrade_standalone_retry_test.go index 00e74b4cef0..d935215e2f4 100644 --- a/testing/integration/upgrade_standalone_retry_test.go +++ b/testing/integration/upgrade_standalone_retry_test.go @@ -15,22 +15,25 @@ import ( "path/filepath" "strings" "testing" + "time" "github.com/stretchr/testify/assert" "github.com/stretchr/testify/require" atesting "github.com/elastic/elastic-agent/pkg/testing" "github.com/elastic/elastic-agent/pkg/testing/define" + "github.com/elastic/elastic-agent/pkg/testing/tools/testcontext" "github.com/elastic/elastic-agent/testing/upgradetest" ) func TestStandaloneUpgradeRetryDownload(t *testing.T) { define.Require(t, define.Requirements{ + Group: Upgrade, Local: false, // requires Agent installation Sudo: true, // requires Agent installation }) - ctx, cancel := context.WithCancel(context.Background()) + ctx, cancel := testcontext.WithDeadline(t, context.Background(), time.Now().Add(10*time.Minute)) defer cancel() // Start at the build version as we want to test the retry diff --git a/testing/integration/upgrade_standalone_test.go b/testing/integration/upgrade_standalone_test.go index f1d97bd12b5..4519a6611c3 100644 --- a/testing/integration/upgrade_standalone_test.go +++ b/testing/integration/upgrade_standalone_test.go @@ -10,22 +10,25 @@ import ( "context" "fmt" "testing" + "time" "github.com/stretchr/testify/assert" "github.com/stretchr/testify/require" atesting "github.com/elastic/elastic-agent/pkg/testing" "github.com/elastic/elastic-agent/pkg/testing/define" + "github.com/elastic/elastic-agent/pkg/testing/tools/testcontext" "github.com/elastic/elastic-agent/testing/upgradetest" ) func TestStandaloneUpgrade(t *testing.T) { define.Require(t, define.Requirements{ + Group: Upgrade, Local: false, // requires Agent installation Sudo: true, // requires Agent installation }) - ctx, cancel := context.WithCancel(context.Background()) + ctx, cancel := testcontext.WithDeadline(t, context.Background(), time.Now().Add(10*time.Minute)) defer cancel() // test 2 current 8.x version and 1 previous 7.x version diff --git a/testing/integration/upgrade_uninstall_test.go b/testing/integration/upgrade_uninstall_test.go index a26a3bb6d1e..2ed3e8d4662 100644 --- a/testing/integration/upgrade_uninstall_test.go +++ b/testing/integration/upgrade_uninstall_test.go @@ -12,6 +12,7 @@ import ( "testing" "time" + "github.com/elastic/elastic-agent/pkg/testing/tools/testcontext" "github.com/elastic/elastic-agent/pkg/version" "github.com/stretchr/testify/assert" @@ -24,6 +25,7 @@ import ( func TestStandaloneUpgradeUninstallKillWatcher(t *testing.T) { define.Require(t, define.Requirements{ + Group: Upgrade, Local: false, // requires Agent installation Sudo: true, // requires Agent installation }) @@ -34,7 +36,7 @@ func TestStandaloneUpgradeUninstallKillWatcher(t *testing.T) { t.Skipf("Version %s is lower than min version %s; test cannot be performed", define.Version(), upgradetest.Version_8_11_0_SNAPSHOT) } - ctx, cancel := context.WithCancel(context.Background()) + ctx, cancel := testcontext.WithDeadline(t, context.Background(), time.Now().Add(10*time.Minute)) defer cancel() // Start at old version, we want this test to upgrade to our diff --git a/version/version.go b/version/version.go index 9bb9561aa92..eefb327ba2d 100644 --- a/version/version.go +++ b/version/version.go @@ -4,5 +4,5 @@ package version -const defaultBeatVersion = "8.12.0" +const defaultBeatVersion = "8.13.0" const Agent = defaultBeatVersion